id stringlengths 28 33 | content stringlengths 14 265k ⌀ | max_stars_repo_path stringlengths 49 55 |
|---|---|---|
crossvul-python_data_bad_4360_0 | import hashlib
import logging
import os
import warnings
from base64 import b64encode
from json import JSONDecodeError
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Type
from typing import Union
from typing import cast
from urllib.parse import parse_qs
from urllib.parse import urlparse
from jwkest import BadSyntax
from jwkest import as_bytes
from jwkest import jwe
from jwkest import jws
from jwkest import jwt
from jwkest.jwe import JWE
from requests import ConnectionError
from oic import oauth2
from oic import rndstr
from oic.exception import AccessDenied
from oic.exception import AuthnToOld
from oic.exception import AuthzError
from oic.exception import CommunicationError
from oic.exception import MissingParameter
from oic.exception import ParameterError
from oic.exception import PyoidcError
from oic.exception import RegistrationError
from oic.exception import RequestError
from oic.exception import SubMismatch
from oic.oauth2 import HTTP_ARGS
from oic.oauth2 import authz_error
from oic.oauth2.consumer import ConfigurationError
from oic.oauth2.exception import MissingRequiredAttribute
from oic.oauth2.exception import OtherError
from oic.oauth2.exception import ParseError
from oic.oauth2.message import ErrorResponse
from oic.oauth2.message import Message
from oic.oauth2.message import MessageFactory
from oic.oauth2.util import get_or_post
from oic.oic.message import SCOPE2CLAIMS
from oic.oic.message import AccessTokenResponse
from oic.oic.message import AuthorizationErrorResponse
from oic.oic.message import AuthorizationRequest
from oic.oic.message import AuthorizationResponse
from oic.oic.message import Claims
from oic.oic.message import ClaimsRequest
from oic.oic.message import ClientRegistrationErrorResponse
from oic.oic.message import EndSessionRequest
from oic.oic.message import IdToken
from oic.oic.message import JasonWebToken
from oic.oic.message import OIDCMessageFactory
from oic.oic.message import OpenIDRequest
from oic.oic.message import OpenIDSchema
from oic.oic.message import RefreshSessionRequest
from oic.oic.message import RegistrationRequest
from oic.oic.message import RegistrationResponse
from oic.oic.message import TokenErrorResponse
from oic.oic.message import UserInfoErrorResponse
from oic.oic.message import UserInfoRequest
from oic.utils import time_util
from oic.utils.http_util import Response
from oic.utils.keyio import KeyJar
from oic.utils.sanitize import sanitize
from oic.utils.settings import OicClientSettings
from oic.utils.settings import OicServerSettings
from oic.utils.settings import PyoidcSettings
from oic.utils.webfinger import OIC_ISSUER
from oic.utils.webfinger import WebFinger
__author__ = "rohe0002"
logger = logging.getLogger(__name__)
ENDPOINTS = [
"authorization_endpoint",
"token_endpoint",
"userinfo_endpoint",
"refresh_session_endpoint",
"end_session_endpoint",
"registration_endpoint",
"check_id_endpoint",
]
RESPONSE2ERROR: Dict[str, List] = {
"AuthorizationResponse": [AuthorizationErrorResponse, TokenErrorResponse],
"AccessTokenResponse": [TokenErrorResponse],
"IdToken": [ErrorResponse],
"RegistrationResponse": [ClientRegistrationErrorResponse],
"OpenIDSchema": [UserInfoErrorResponse],
}
REQUEST2ENDPOINT = {
"AuthorizationRequest": "authorization_endpoint",
"OpenIDRequest": "authorization_endpoint",
"AccessTokenRequest": "token_endpoint",
"RefreshAccessTokenRequest": "token_endpoint",
"UserInfoRequest": "userinfo_endpoint",
"CheckSessionRequest": "check_session_endpoint",
"CheckIDRequest": "check_id_endpoint",
"EndSessionRequest": "end_session_endpoint",
"RefreshSessionRequest": "refresh_session_endpoint",
"RegistrationRequest": "registration_endpoint",
"RotateSecret": "registration_endpoint",
# ---
"ResourceRequest": "resource_endpoint",
"TokenIntrospectionRequest": "introspection_endpoint",
"TokenRevocationRequest": "revocation_endpoint",
"ROPCAccessTokenRequest": "token_endpoint",
}
# -----------------------------------------------------------------------------
JWT_BEARER = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer"
SAML2_BEARER_GRANT_TYPE = "urn:ietf:params:oauth:grant-type:saml2-bearer"
# This should probably be part of the configuration
MAX_AUTHENTICATION_AGE = 86400
DEF_SIGN_ALG = {
"id_token": "RS256",
"openid_request_object": "RS256",
"client_secret_jwt": "HS256",
"private_key_jwt": "RS256",
}
# -----------------------------------------------------------------------------
ACR_LISTS = [["0", "1", "2", "3", "4"]]
def verify_acr_level(req, level):
if req is None:
return level
elif "values" in req:
for _r in req["values"]:
for alist in ACR_LISTS:
try:
if alist.index(_r) <= alist.index(level):
return level
except ValueError:
pass
else: # Required or Optional
return level
raise AccessDenied("", req)
def deser_id_token(inst, txt=""):
if not txt:
return None
else:
return IdToken().from_jwt(txt, keyjar=inst.keyjar)
# -----------------------------------------------------------------------------
def make_openid_request(
arq,
keys=None,
userinfo_claims=None,
idtoken_claims=None,
request_object_signing_alg=None,
**kwargs,
):
"""
Construct the specification of what I want returned.
The request will be signed.
:param arq: The Authorization request
:param keys: Keys to use for signing/encrypting
:param userinfo_claims: UserInfo claims
:param idtoken_claims: IdToken claims
:param request_object_signing_alg: Which signing algorithm to use
:return: JWT encoded OpenID request
"""
oir_args = {}
for prop in OpenIDRequest.c_param.keys():
try:
oir_args[prop] = arq[prop]
except KeyError:
pass
for attr in ["scope", "response_type"]:
if attr in oir_args:
oir_args[attr] = " ".join(oir_args[attr])
c_args = {}
if userinfo_claims is not None:
# UserInfoClaims
c_args["userinfo"] = Claims(**userinfo_claims)
if idtoken_claims is not None:
# IdTokenClaims
c_args["id_token"] = Claims(**idtoken_claims)
if c_args:
oir_args["claims"] = ClaimsRequest(**c_args)
oir = OpenIDRequest(**oir_args)
return oir.to_jwt(key=keys, algorithm=request_object_signing_alg)
class Token(oauth2.Token):
pass
class Grant(oauth2.Grant):
_authz_resp = AuthorizationResponse
_acc_resp = AccessTokenResponse
_token_class = Token
def add_token(self, resp):
tok = self._token_class(resp)
if tok.access_token:
self.tokens.append(tok)
else:
_tmp = getattr(tok, "id_token", None)
if _tmp:
self.tokens.append(tok)
PREFERENCE2PROVIDER = {
"request_object_signing_alg": "request_object_signing_alg_values_supported",
"request_object_encryption_alg": "request_object_encryption_alg_values_supported",
"request_object_encryption_enc": "request_object_encryption_enc_values_supported",
"userinfo_signed_response_alg": "userinfo_signing_alg_values_supported",
"userinfo_encrypted_response_alg": "userinfo_encryption_alg_values_supported",
"userinfo_encrypted_response_enc": "userinfo_encryption_enc_values_supported",
"id_token_signed_response_alg": "id_token_signing_alg_values_supported",
"id_token_encrypted_response_alg": "id_token_encryption_alg_values_supported",
"id_token_encrypted_response_enc": "id_token_encryption_enc_values_supported",
"default_acr_values": "acr_values_supported",
"subject_type": "subject_types_supported",
"token_endpoint_auth_method": "token_endpoint_auth_methods_supported",
"token_endpoint_auth_signing_alg": "token_endpoint_auth_signing_alg_values_supported",
"response_types": "response_types_supported",
"grant_types": "grant_types_supported",
}
PROVIDER2PREFERENCE = dict([(v, k) for k, v in PREFERENCE2PROVIDER.items()])
PROVIDER_DEFAULT = {
"token_endpoint_auth_method": "client_secret_basic",
"id_token_signed_response_alg": "RS256",
}
PARAMMAP = {
"sign": "%s_signed_response_alg",
"alg": "%s_encrypted_response_alg",
"enc": "%s_encrypted_response_enc",
}
rt2gt = {
"code": ["authorization_code"],
"id_token": ["implicit"],
"id_token token": ["implicit"],
"code id_token": ["authorization_code", "implicit"],
"code token": ["authorization_code", "implicit"],
"code id_token token": ["authorization_code", "implicit"],
}
def response_types_to_grant_types(resp_types, **kwargs):
_res = set()
if "grant_types" in kwargs:
_res.update(set(kwargs["grant_types"]))
for response_type in resp_types:
_rt = response_type.split(" ")
_rt.sort()
try:
_gt = rt2gt[" ".join(_rt)]
except KeyError:
raise ValueError("No such response type combination: {}".format(resp_types))
else:
_res.update(set(_gt))
return list(_res)
def claims_match(value, claimspec):
"""
Implement matching according to section 5.5.1 of http://openid.net/specs/openid-connect-core-1_0.html.
The lack of value is not checked here.
Also the text doesn't prohibit having both 'value' and 'values'.
:param value: single value or list of values
:param claimspec: None or dictionary with 'essential', 'value' or 'values'
as key
:return: Boolean
"""
if claimspec is None: # match anything
return True
matched = False
for key, val in claimspec.items():
if key == "value":
if value == val:
matched = True
elif key == "values":
if value in val:
matched = True
elif key == "essential":
# Whether it's essential or not doesn't change anything here
continue
if matched:
break
if matched is False:
if list(claimspec.keys()) == ["essential"]:
return True
return matched
class Client(oauth2.Client):
_endpoints = ENDPOINTS
def __init__(
self,
client_id=None,
client_prefs=None,
client_authn_method=None,
keyjar=None,
verify_ssl=None,
config=None,
client_cert=None,
requests_dir="requests",
message_factory: Type[MessageFactory] = OIDCMessageFactory,
settings: PyoidcSettings = None,
):
"""
Initialize the instance.
Keyword Args:
settings
Instance of :class:`OauthClientSettings` with configuration options.
Currently used settings are:
- verify_ssl
- client_cert
- timeout
"""
self.settings = settings or OicClientSettings()
if verify_ssl is not None:
warnings.warn(
"`verify_ssl` is deprecated, please use `settings` instead if you need to set a non-default value.",
DeprecationWarning,
stacklevel=2,
)
self.settings.verify_ssl = verify_ssl
if client_cert is not None:
warnings.warn(
"`client_cert` is deprecated, please use `settings` instead if you need to set a non-default value.",
DeprecationWarning,
stacklevel=2,
)
self.settings.client_cert = client_cert
oauth2.Client.__init__(
self,
client_id,
client_authn_method=client_authn_method,
keyjar=keyjar,
config=config,
message_factory=message_factory,
settings=self.settings,
)
self.file_store = "./file/"
self.file_uri = "http://localhost/"
self.base_url = ""
# OpenID connect specific endpoints
for endpoint in ENDPOINTS:
setattr(self, endpoint, "")
self.id_token: Dict[str, Token] = {}
self.log = None
self.request2endpoint = REQUEST2ENDPOINT
self.response2error = RESPONSE2ERROR
self.grant_class = Grant
self.token_class = Token
self.provider_info = Message()
self.registration_response: RegistrationResponse = RegistrationResponse()
self.client_prefs = client_prefs or {}
self.behaviour: Dict[str, Any] = {}
self.scope = ["openid"]
self.wf = WebFinger(OIC_ISSUER)
self.wf.httpd = self
self.allow = {}
self.post_logout_redirect_uris: List[str] = []
self.registration_expires = 0
self.registration_access_token = None
self.id_token_max_age = 0
# Default key by kid for different key types
# For instance {'sig': {"RSA":"abc"}}
self.kid = {"sig": {}, "enc": {}}
self.requests_dir = requests_dir
def _get_id_token(self, **kwargs):
try:
return kwargs["id_token"]
except KeyError:
grant = self.get_grant(**kwargs)
if grant:
try:
_scope = kwargs["scope"]
except KeyError:
_scope = None
for token in grant.tokens:
if token.scope and _scope:
flag = True
for item in _scope:
if item not in token.scope:
flag = False
break
if not flag:
break
if token.id_token:
return token.id_token.jwt
return None
def request_object_encryption(self, msg, **kwargs):
try:
encalg = kwargs["request_object_encryption_alg"]
except KeyError:
try:
encalg = self.behaviour["request_object_encryption_alg"]
except KeyError:
return msg
try:
encenc = kwargs["request_object_encryption_enc"]
except KeyError:
try:
encenc = self.behaviour["request_object_encryption_enc"]
except KeyError:
raise MissingRequiredAttribute(
"No request_object_encryption_enc specified"
)
_jwe = JWE(msg, alg=encalg, enc=encenc)
_kty = jwe.alg2keytype(encalg)
try:
_kid = kwargs["enc_kid"]
except KeyError:
_kid = ""
if "target" not in kwargs:
raise MissingRequiredAttribute("No target specified")
if _kid:
_keys = self.keyjar.get_encrypt_key(_kty, owner=kwargs["target"], kid=_kid)
_jwe["kid"] = _kid
else:
_keys = self.keyjar.get_encrypt_key(_kty, owner=kwargs["target"])
return _jwe.encrypt(_keys)
@staticmethod
def construct_redirect_uri(**kwargs):
_filedir = kwargs["local_dir"]
if not os.path.isdir(_filedir):
os.makedirs(_filedir)
_webpath = kwargs["base_path"]
_name = rndstr(10) + ".jwt"
filename = os.path.join(_filedir, _name)
while os.path.exists(filename):
_name = rndstr(10)
filename = os.path.join(_filedir, _name)
_webname = "%s%s" % (_webpath, _name)
return filename, _webname
def filename_from_webname(self, webname):
_filedir = self.requests_dir
if not os.path.isdir(_filedir):
os.makedirs(_filedir)
if webname.startswith(self.base_url):
return webname[len(self.base_url) :]
else:
raise ValueError("Invalid webname, must start with base_url")
def construct_AuthorizationRequest(
self, request=None, request_args=None, extra_args=None, **kwargs
):
if request_args is not None:
if "nonce" not in request_args:
_rt = request_args["response_type"]
if "token" in _rt or "id_token" in _rt:
request_args["nonce"] = rndstr(32)
elif "response_type" in kwargs:
if "token" in kwargs["response_type"]:
request_args = {"nonce": rndstr(32)}
else: # Never wrong to specify a nonce
request_args = {"nonce": rndstr(32)}
request_param = kwargs.get("request_param")
if "request_method" in kwargs:
if kwargs["request_method"] == "file":
request_param = "request_uri"
else:
request_param = "request"
del kwargs["request_method"]
areq = super().construct_AuthorizationRequest(
request=request, request_args=request_args, extra_args=extra_args, **kwargs
)
if request_param:
alg = None
for arg in ["request_object_signing_alg", "algorithm"]:
try: # Trumps everything
alg = kwargs[arg]
except KeyError:
pass
else:
break
if not alg:
try:
alg = self.behaviour["request_object_signing_alg"]
except KeyError:
alg = "none"
kwargs["request_object_signing_alg"] = alg
if "keys" not in kwargs and alg and alg != "none":
_kty = jws.alg2keytype(alg)
try:
_kid = kwargs["sig_kid"]
except KeyError:
_kid = self.kid["sig"].get(_kty, None)
kwargs["keys"] = self.keyjar.get_signing_key(_kty, kid=_kid)
_req = make_openid_request(areq, **kwargs)
# Should the request be encrypted
_req = self.request_object_encryption(_req, **kwargs)
if request_param == "request":
areq["request"] = _req
else:
try:
_webname = self.registration_response["request_uris"][0]
filename = self.filename_from_webname(_webname)
except KeyError:
filename, _webname = self.construct_redirect_uri(**kwargs)
with open(filename, mode="w") as fid:
fid.write(_req)
areq["request_uri"] = _webname
return areq
def construct_UserInfoRequest(
self, request=None, request_args=None, extra_args=None, **kwargs
):
if request is None:
request = self.message_factory.get_request_type("userinfo_endpoint")
if request_args is None:
request_args = {}
if "access_token" in request_args:
pass
else:
if "scope" not in kwargs:
kwargs["scope"] = "openid"
token = self.get_token(**kwargs)
if token is None:
raise MissingParameter("No valid token available")
request_args["access_token"] = token.access_token
return self.construct_request(request, request_args, extra_args)
def construct_RegistrationRequest(
self, request=None, request_args=None, extra_args=None, **kwargs
):
if request is None:
request = self.message_factory.get_request_type("registration_endpoint")
return self.construct_request(request, request_args, extra_args)
def construct_RefreshSessionRequest(
self, request=None, request_args=None, extra_args=None, **kwargs
):
if request is None:
request = self.message_factory.get_request_type("refreshsession_endpoint")
return self.construct_request(request, request_args, extra_args)
def _id_token_based(self, request, request_args=None, extra_args=None, **kwargs):
if request_args is None:
request_args = {}
try:
_prop = kwargs["prop"]
except KeyError:
_prop = "id_token"
if _prop in request_args:
pass
else:
raw_id_token = self._get_id_token(**kwargs)
if raw_id_token is None:
raise MissingParameter("No valid id token available")
request_args[_prop] = raw_id_token
return self.construct_request(request, request_args, extra_args)
def construct_CheckSessionRequest(
self, request=None, request_args=None, extra_args=None, **kwargs
):
if request is None:
request = self.message_factory.get_request_type("checksession_endpoint")
return self._id_token_based(request, request_args, extra_args, **kwargs)
def construct_CheckIDRequest(
self, request=None, request_args=None, extra_args=None, **kwargs
):
if request is None:
request = self.message_factory.get_request_type("checkid_endpoint")
# access_token is where the id_token will be placed
return self._id_token_based(
request, request_args, extra_args, prop="access_token", **kwargs
)
def construct_EndSessionRequest(
self, request=None, request_args=None, extra_args=None, **kwargs
):
if request is None:
request = self.message_factory.get_request_type("endsession_endpoint")
if request_args is None:
request_args = {}
if "state" in request_args and "state" not in kwargs:
kwargs["state"] = request_args["state"]
return self._id_token_based(request, request_args, extra_args, **kwargs)
def do_authorization_request(
self,
state="",
body_type="",
method="GET",
request_args=None,
extra_args=None,
http_args=None,
**kwargs,
):
algs = self.sign_enc_algs("id_token")
if "code_challenge" in self.config:
_args, code_verifier = self.add_code_challenge()
request_args.update(_args)
return super().do_authorization_request(
state=state,
body_type=body_type,
method=method,
request_args=request_args,
extra_args=extra_args,
http_args=http_args,
algs=algs,
)
def do_access_token_request(
self,
scope="",
state="",
body_type="json",
method="POST",
request_args=None,
extra_args=None,
http_args=None,
authn_method="client_secret_basic",
**kwargs,
):
atr = super().do_access_token_request(
scope=scope,
state=state,
body_type=body_type,
method=method,
request_args=request_args,
extra_args=extra_args,
http_args=http_args,
authn_method=authn_method,
**kwargs,
)
try:
_idt = atr["id_token"]
except KeyError:
pass
else:
try:
if self.state2nonce[state] != _idt["nonce"]:
raise ParameterError('Someone has messed with "nonce"')
except KeyError:
pass
return atr
def do_registration_request(
self,
scope="",
state="",
body_type="json",
method="POST",
request_args=None,
extra_args=None,
http_args=None,
):
request = self.message_factory.get_request_type("registration_endpoint")
url, body, ht_args, csi = self.request_info(
request,
method=method,
request_args=request_args,
extra_args=extra_args,
scope=scope,
state=state,
)
if http_args is None:
http_args = ht_args
else:
http_args.update(http_args)
response_cls = self.message_factory.get_response_type("registration_endpoint")
response = self.request_and_return(
url, response_cls, method, body, body_type, state=state, http_args=http_args
)
return response
def do_check_session_request(
self,
scope="",
state="",
body_type="json",
method="GET",
request_args=None,
extra_args=None,
http_args=None,
):
request = self.message_factory.get_request_type("checksession_endpoint")
response_cls = self.message_factory.get_response_type("checksession_endpoint")
url, body, ht_args, csi = self.request_info(
request,
method=method,
request_args=request_args,
extra_args=extra_args,
scope=scope,
state=state,
)
if http_args is None:
http_args = ht_args
else:
http_args.update(http_args)
return self.request_and_return(
url, response_cls, method, body, body_type, state=state, http_args=http_args
)
def do_check_id_request(
self,
scope="",
state="",
body_type="json",
method="GET",
request_args=None,
extra_args=None,
http_args=None,
):
request = self.message_factory.get_request_type("checkid_endpoint")
response_cls = self.message_factory.get_response_type("checkid_endpoint")
url, body, ht_args, csi = self.request_info(
request,
method=method,
request_args=request_args,
extra_args=extra_args,
scope=scope,
state=state,
)
if http_args is None:
http_args = ht_args
else:
http_args.update(http_args)
return self.request_and_return(
url, response_cls, method, body, body_type, state=state, http_args=http_args
)
def do_end_session_request(
self,
scope="",
state="",
body_type="",
method="GET",
request_args=None,
extra_args=None,
http_args=None,
):
request = self.message_factory.get_request_type("endsession_endpoint")
response_cls = self.message_factory.get_response_type("endsession_endpoint")
url, body, ht_args, _ = self.request_info(
request,
method=method,
request_args=request_args,
extra_args=extra_args,
scope=scope,
state=state,
)
if http_args is None:
http_args = ht_args
else:
http_args.update(http_args)
return self.request_and_return(
url, response_cls, method, body, body_type, state=state, http_args=http_args
)
def user_info_request(self, method="GET", state="", scope="", **kwargs):
uir = self.message_factory.get_request_type("userinfo_endpoint")()
logger.debug("[user_info_request]: kwargs:%s" % (sanitize(kwargs),))
token: Optional[Token] = None
if "token" in kwargs:
if kwargs["token"]:
uir["access_token"] = kwargs["token"]
token = Token()
token.token_type = "Bearer"
token.access_token = kwargs["token"]
kwargs["behavior"] = "use_authorization_header"
else:
# What to do ? Need a callback
pass
elif "access_token" in kwargs and kwargs["access_token"]:
uir["access_token"] = kwargs["access_token"]
del kwargs["access_token"]
elif state:
token = self.grant[state].get_token(scope)
if token is None:
raise AccessDenied("invalid_token")
if token.is_valid():
uir["access_token"] = token.access_token
if (
token.token_type
and token.token_type.lower() == "bearer"
and method == "GET"
):
kwargs["behavior"] = "use_authorization_header"
else:
# raise oauth2.OldAccessToken
if self.log:
self.log.info("do access token refresh")
try:
self.do_access_token_refresh(token=token, state=state)
token = cast(Token, self.grant[state].get_token(scope))
uir["access_token"] = token.access_token
except Exception:
raise
uri = self._endpoint("userinfo_endpoint", **kwargs)
# If access token is a bearer token it might be sent in the
# authorization header
# 4 ways of sending the access_token:
# - POST with token in authorization header
# - POST with token in message body
# - GET with token in authorization header
# - GET with token as query parameter
if "behavior" in kwargs:
_behav = kwargs["behavior"]
_token = uir["access_token"]
_ttype = ""
try:
_ttype = kwargs["token_type"]
except KeyError:
if token:
try:
_ttype = cast(str, token.token_type)
except AttributeError:
raise MissingParameter("Unspecified token type")
if "as_query_parameter" == _behav:
method = "GET"
elif token:
# use_authorization_header, token_in_message_body
if "use_authorization_header" in _behav:
token_header = "{type} {token}".format(
type=_ttype.capitalize(), token=_token
)
if "headers" in kwargs:
kwargs["headers"].update({"Authorization": token_header})
else:
kwargs["headers"] = {"Authorization": token_header}
if "token_in_message_body" not in _behav:
# remove the token from the request
del uir["access_token"]
path, body, kwargs = get_or_post(uri, method, uir, **kwargs)
h_args = dict([(k, v) for k, v in kwargs.items() if k in HTTP_ARGS])
return path, body, method, h_args
def do_user_info_request(
self, method="POST", state="", scope="openid", request="openid", **kwargs
):
kwargs["request"] = request
path, body, method, h_args = self.user_info_request(
method, state, scope, **kwargs
)
logger.debug(
"[do_user_info_request] PATH:%s BODY:%s H_ARGS: %s"
% (sanitize(path), sanitize(body), sanitize(h_args))
)
if self.events:
self.events.store("Request", {"body": body})
self.events.store("request_url", path)
self.events.store("request_http_args", h_args)
try:
resp = self.http_request(path, method, data=body, **h_args)
except oauth2.exception.MissingRequiredAttribute:
raise
if resp.status_code == 200:
if "application/json" in resp.headers["content-type"]:
sformat = "json"
elif "application/jwt" in resp.headers["content-type"]:
sformat = "jwt"
else:
raise PyoidcError(
"ERROR: Unexpected content-type: %s" % resp.headers["content-type"]
)
elif resp.status_code == 500:
raise PyoidcError("ERROR: Something went wrong: %s" % resp.text)
elif resp.status_code == 405:
# Method not allowed error
allowed_methods = [x.strip() for x in resp.headers["allow"].split(",")]
raise CommunicationError(
"Server responded with HTTP Error Code 405", "", allowed_methods
)
elif 400 <= resp.status_code < 500:
# the response text might be a OIDC message
try:
res = ErrorResponse().from_json(resp.text)
except Exception:
raise RequestError(resp.text)
else:
self.store_response(res, resp.text)
return res
else:
raise PyoidcError(
"ERROR: Something went wrong [%s]: %s" % (resp.status_code, resp.text)
)
try:
_schema = kwargs["user_info_schema"]
except KeyError:
_schema = OpenIDSchema
logger.debug("Reponse text: '%s'" % sanitize(resp.text))
_txt = resp.text
if sformat == "json":
res = _schema().from_json(txt=_txt)
else:
verify = kwargs.get("verify", True)
res = _schema().from_jwt(
_txt,
keyjar=self.keyjar,
sender=self.provider_info["issuer"],
verify=verify,
)
if "error" in res: # Error response
res = UserInfoErrorResponse(**res.to_dict())
if state:
# Verify userinfo sub claim against what's returned in the ID Token
idt = self.grant[state].get_id_token()
if idt:
if idt["sub"] != res["sub"]:
raise SubMismatch(
"Sub identifier not the same in userinfo and Id Token"
)
self.store_response(res, _txt)
return res
def get_userinfo_claims(
self, access_token, endpoint, method="POST", schema_class=OpenIDSchema, **kwargs
):
uir = UserInfoRequest(access_token=access_token)
h_args = dict([(k, v) for k, v in kwargs.items() if k in HTTP_ARGS])
if "authn_method" in kwargs:
http_args = self.init_authentication_method(**kwargs)
else:
# If nothing defined this is the default
http_args = self.init_authentication_method(uir, "bearer_header", **kwargs)
h_args.update(http_args)
path, body, kwargs = get_or_post(endpoint, method, uir, **kwargs)
try:
resp = self.http_request(path, method, data=body, **h_args)
except MissingRequiredAttribute:
raise
if resp.status_code == 200:
# FIXME: Could this also encounter application/jwt for encrypted userinfo
# the do_userinfo_request method already handles it
if "application/json" not in resp.headers["content-type"]:
raise PyoidcError(
"ERROR: content-type in response unexpected: %s"
% resp.headers["content-type"]
)
elif resp.status_code == 500:
raise PyoidcError("ERROR: Something went wrong: %s" % resp.text)
else:
raise PyoidcError(
"ERROR: Something went wrong [%s]: %s" % (resp.status_code, resp.text)
)
res = schema_class().from_json(txt=resp.text)
self.store_response(res, resp.text)
return res
def unpack_aggregated_claims(self, userinfo):
if userinfo["_claim_sources"]:
for csrc, spec in userinfo["_claim_sources"].items():
if "JWT" in spec:
aggregated_claims = Message().from_jwt(
spec["JWT"].encode("utf-8"), keyjar=self.keyjar, sender=csrc
)
claims = [
value
for value, src in userinfo["_claim_names"].items()
if src == csrc
]
if set(claims) != set(list(aggregated_claims.keys())):
logger.warning(
"Claims from claim source doesn't match what's in "
"the userinfo"
)
for key, vals in aggregated_claims.items():
userinfo[key] = vals
return userinfo
def fetch_distributed_claims(self, userinfo, callback=None):
for csrc, spec in userinfo["_claim_sources"].items():
if "endpoint" in spec:
if not spec["endpoint"].startswith("https://"):
logger.warning(
"Fetching distributed claims from an untrusted source: %s",
spec["endpoint"],
)
if "access_token" in spec:
_uinfo = self.do_user_info_request(
method="GET",
token=spec["access_token"],
userinfo_endpoint=spec["endpoint"],
verify=False,
)
else:
if callback:
_uinfo = self.do_user_info_request(
method="GET",
token=callback(spec["endpoint"]),
userinfo_endpoint=spec["endpoint"],
verify=False,
)
else:
_uinfo = self.do_user_info_request(
method="GET",
userinfo_endpoint=spec["endpoint"],
verify=False,
)
claims = [
value
for value, src in userinfo["_claim_names"].items()
if src == csrc
]
if set(claims) != set(list(_uinfo.keys())):
logger.warning(
"Claims from claim source doesn't match what's in "
"the userinfo"
)
for key, vals in _uinfo.items():
userinfo[key] = vals
# Remove the `_claim_sources` and `_claim_names` from userinfo and better be safe than sorry
if "_claim_sources" in userinfo:
del userinfo["_claim_sources"]
if "_claim_names" in userinfo:
del userinfo["_claim_names"]
return userinfo
def verify_alg_support(self, alg, usage, other):
"""
Verify that the algorithm to be used are supported by the other side.
:param alg: The algorithm specification
:param usage: In which context the 'alg' will be used.
The following values are supported:
- userinfo
- id_token
- request_object
- token_endpoint_auth
:param other: The identifier for the other side
:return: True or False
"""
try:
_pcr = self.provider_info
supported = _pcr["%s_algs_supported" % usage]
except KeyError:
try:
supported = getattr(self, "%s_algs_supported" % usage)
except AttributeError:
supported = None
if supported is None:
return True
else:
if alg in supported:
return True
else:
return False
def match_preferences(self, pcr=None, issuer=None):
"""
Match the clients preferences against what the provider can do.
:param pcr: Provider configuration response if available
:param issuer: The issuer identifier
"""
if not pcr:
pcr = self.provider_info
regreq = self.message_factory.get_request_type("registration_endpoint")
for _pref, _prov in PREFERENCE2PROVIDER.items():
try:
vals = self.client_prefs[_pref]
except KeyError:
continue
try:
_pvals = pcr[_prov]
except KeyError:
try:
self.behaviour[_pref] = PROVIDER_DEFAULT[_pref]
except KeyError:
if isinstance(pcr.c_param[_prov][0], list):
self.behaviour[_pref] = []
else:
self.behaviour[_pref] = None
continue
if isinstance(vals, str):
if vals in _pvals:
self.behaviour[_pref] = vals
else:
vtyp = regreq.c_param[_pref]
if isinstance(vtyp[0], list):
self.behaviour[_pref] = []
for val in vals:
if val in _pvals:
self.behaviour[_pref].append(val)
else:
for val in vals:
if val in _pvals:
self.behaviour[_pref] = val
break
if _pref not in self.behaviour:
raise ConfigurationError("OP couldn't match preference:%s" % _pref, pcr)
for key, val in self.client_prefs.items():
if key in self.behaviour:
continue
try:
vtyp = regreq.c_param[key]
if isinstance(vtyp[0], list):
pass
elif isinstance(val, list) and not isinstance(val, str):
val = val[0]
except KeyError:
pass
if key not in PREFERENCE2PROVIDER:
self.behaviour[key] = val
def store_registration_info(self, reginfo):
self.registration_response = reginfo
if "token_endpoint_auth_method" not in self.registration_response:
self.registration_response[
"token_endpoint_auth_method" # nosec
] = "client_secret_basic"
self.client_id = reginfo["client_id"]
try:
self.client_secret = reginfo["client_secret"]
except KeyError: # Not required
pass
else:
try:
self.registration_expires = reginfo["client_secret_expires_at"]
except KeyError:
pass
try:
self.registration_access_token = reginfo["registration_access_token"]
except KeyError:
pass
def handle_registration_info(self, response):
err_msg = "Got error response: {}"
unk_msg = "Unknown response: {}"
if response.status_code in [200, 201]:
resp = self.message_factory.get_response_type(
"registration_endpoint"
)().deserialize(response.text, "json")
# Some implementations sends back a 200 with an error message inside
try:
resp.verify()
except oauth2.message.MissingRequiredAttribute as err:
logger.error(err)
raise RegistrationError(err)
except Exception:
resp = ErrorResponse().deserialize(response.text, "json")
if resp.verify():
logger.error(err_msg.format(sanitize(resp.to_json())))
if self.events:
self.events.store("protocol response", resp)
raise RegistrationError(resp.to_dict())
else: # Something else
logger.error(unk_msg.format(sanitize(response.text)))
raise RegistrationError(response.text)
else:
# got a proper registration response
self.store_response(resp, response.text)
self.store_registration_info(resp)
elif 400 <= response.status_code <= 499:
try:
resp = ErrorResponse().deserialize(response.text, "json")
except JSONDecodeError:
logger.error(unk_msg.format(sanitize(response.text)))
raise RegistrationError(response.text)
if resp.verify():
logger.error(err_msg.format(sanitize(resp.to_json())))
if self.events:
self.events.store("protocol response", resp)
raise RegistrationError(resp.to_dict())
else: # Something else
logger.error(unk_msg.format(sanitize(response.text)))
raise RegistrationError(response.text)
else:
raise RegistrationError(response.text)
return resp
def registration_read(self, url="", registration_access_token=None):
"""
Read the client registration info from the given url.
:raises RegistrationError: If an error happend
:return: RegistrationResponse
"""
if not url:
url = self.registration_response["registration_client_uri"]
if not registration_access_token:
registration_access_token = self.registration_access_token
headers = {"Authorization": "Bearer %s" % registration_access_token}
rsp = self.http_request(url, "GET", headers=headers)
return self.handle_registration_info(rsp)
def generate_request_uris(self, request_dir):
"""
Need to generate a path that is unique for the OP combo.
:return: A list of uris
"""
m = hashlib.sha256()
m.update(as_bytes(self.provider_info["issuer"]))
m.update(as_bytes(self.base_url))
return "{}{}/{}".format(self.base_url, request_dir, m.hexdigest())
def create_registration_request(self, **kwargs):
"""
Create a registration request.
:param kwargs: parameters to the registration request
:return:
"""
req = self.message_factory.get_request_type("registration_endpoint")()
for prop in req.parameters():
try:
req[prop] = kwargs[prop]
except KeyError:
try:
req[prop] = self.behaviour[prop]
except KeyError:
pass
if "post_logout_redirect_uris" not in req:
try:
req["post_logout_redirect_uris"] = self.post_logout_redirect_uris
except AttributeError:
pass
if "redirect_uris" not in req:
try:
req["redirect_uris"] = self.redirect_uris
except AttributeError:
raise MissingRequiredAttribute("redirect_uris", req)
try:
if self.provider_info["require_request_uri_registration"] is True:
req["request_uris"] = self.generate_request_uris(self.requests_dir)
except KeyError:
pass
if "response_types" in req:
req["grant_types"] = response_types_to_grant_types(
req["response_types"], **kwargs
)
return req
def register(self, url, registration_token=None, **kwargs):
"""
Register the client at an OP.
:param url: The OPs registration endpoint
:param registration_token: Initial Access Token for registration endpoint
:param kwargs: parameters to the registration request
:return:
"""
req = self.create_registration_request(**kwargs)
logger.debug("[registration_request]: kwargs:%s" % (sanitize(kwargs),))
if self.events:
self.events.store("Protocol request", req)
headers = {"content-type": "application/json"}
if registration_token is not None:
try:
token = jwt.JWT()
token.unpack(registration_token)
except BadSyntax:
# no JWT
registration_token = b64encode(registration_token.encode()).decode()
finally:
headers["Authorization"] = "Bearer " + registration_token
rsp = self.http_request(url, "POST", data=req.to_json(), headers=headers)
return self.handle_registration_info(rsp)
def normalization(self, principal, idtype="mail"):
if idtype == "mail":
(_, domain) = principal.split("@")
subject = "acct:%s" % principal
elif idtype == "url":
p = urlparse(principal)
domain = p.netloc
subject = principal
else:
domain = ""
subject = principal
return subject, domain
def discover(self, principal, host=None):
return self.wf.discovery_query(principal, host=host)
def sign_enc_algs(self, typ):
resp = {}
for key, val in PARAMMAP.items():
try:
resp[key] = self.registration_response[val % typ]
except (TypeError, KeyError):
if key == "sign":
resp[key] = DEF_SIGN_ALG["id_token"]
return resp
def _verify_id_token(
self, id_token, nonce="", acr_values=None, auth_time=0, max_age=0
):
"""
Verify IdToken.
If the JWT alg Header Parameter uses a MAC based algorithm such as
HS256, HS384, or HS512, the octets of the UTF-8 representation of the
client_secret corresponding to the client_id contained in the aud
(audience) Claim are used as the key to validate the signature. For MAC
based algorithms, the behavior is unspecified if the aud is
multi-valued or if an azp value is present that is different than the
aud value.
:param id_token: The ID Token tp check
:param nonce: The nonce specified in the authorization request
:param acr_values: Asked for acr values
:param auth_time: An auth_time claim
:param max_age: Max age of authentication
"""
if self.provider_info["issuer"] != id_token["iss"]:
raise OtherError("issuer != iss")
if self.client_id not in id_token["aud"]:
raise OtherError("not intended for me")
if len(id_token["aud"]) > 1:
if "azp" not in id_token or id_token["azp"] != self.client_id:
raise OtherError("not intended for me")
_now = time_util.utc_time_sans_frac()
if _now > id_token["exp"]:
raise OtherError("Passed best before date")
if (
self.id_token_max_age
and _now > int(id_token["iat"]) + self.id_token_max_age
):
raise OtherError("I think this ID token is to old")
if nonce and nonce != id_token["nonce"]:
raise OtherError("nonce mismatch")
if acr_values and id_token["acr"] not in acr_values:
raise OtherError("acr mismatch")
if max_age and _now > int(id_token["auth_time"] + max_age):
raise AuthnToOld("To old authentication")
if auth_time:
if not claims_match(id_token["auth_time"], {"auth_time": auth_time}):
raise AuthnToOld("To old authentication")
def verify_id_token(self, id_token, authn_req):
kwa = {}
try:
kwa["nonce"] = authn_req["nonce"]
except KeyError:
pass
for param in ["acr_values", "max_age"]:
try:
kwa[param] = authn_req[param]
except KeyError:
pass
self._verify_id_token(id_token, **kwa)
class Server(oauth2.Server):
"""OIC Server class."""
def __init__(
self,
verify_ssl: bool = None,
keyjar: KeyJar = None,
client_cert: Union[str, Tuple[str, str]] = None,
timeout: float = None,
message_factory: Type[MessageFactory] = OIDCMessageFactory,
settings: PyoidcSettings = None,
):
"""Initialize the server."""
self.settings = settings or OicServerSettings()
if verify_ssl is not None:
warnings.warn(
"`verify_ssl` is deprecated, please use `settings` instead if you need to set a non-default value.",
DeprecationWarning,
stacklevel=2,
)
self.settings.verify_ssl = verify_ssl
if client_cert is not None:
warnings.warn(
"`client_cert` is deprecated, please use `settings` instead if you need to set a non-default value.",
DeprecationWarning,
stacklevel=2,
)
self.settings.client_cert = client_cert
if timeout is not None:
warnings.warn(
"`timeout` is deprecated, please use `settings` instead if you need to set a non-default value.",
DeprecationWarning,
stacklevel=2,
)
self.settings.timeout = timeout
super().__init__(
keyjar=keyjar,
message_factory=message_factory,
settings=self.settings,
)
@staticmethod
def _parse_urlencoded(url=None, query=None):
if url:
parts = urlparse(url)
scheme, netloc, path, params, query, fragment = parts[:6]
return parse_qs(query)
def handle_request_uri(self, request_uri, verify=True, sender=""):
"""
Handle request URI.
:param request_uri: URL pointing to where the signed request should be fetched from.
:param verify: Whether the signature on the request should be verified.
Don't use anything but the default unless you REALLY know what you're doing
:param sender: The issuer of the request JWT.
:return:
"""
# Do a HTTP get
logger.debug("Get request from request_uri: {}".format(request_uri))
try:
http_req = self.http_request(request_uri)
except ConnectionError:
logger.error("Connection Error")
return authz_error("invalid_request_uri")
if not http_req:
logger.error("Nothing returned")
return authz_error("invalid_request_uri")
elif http_req.status_code >= 400:
logger.error("HTTP error {}:{}".format(http_req.status_code, http_req.text))
raise AuthzError("invalid_request")
# http_req.text is a signed JWT
try:
logger.debug("request txt: {}".format(http_req.text))
req = self.parse_jwt_request(
txt=http_req.text, verify=verify, sender=sender
)
except Exception as err:
logger.error(
"{}:{} encountered while parsing fetched request".format(
err.__class__, err
)
)
raise AuthzError("invalid_openid_request_object")
logger.debug("Fetched request: {}".format(req))
return req
def parse_authorization_request(
self, request=AuthorizationRequest, url=None, query=None, keys=None
):
if url:
parts = urlparse(url)
scheme, netloc, path, params, query, fragment = parts[:6]
if isinstance(query, dict):
sformat = "dict"
else:
sformat = "urlencoded"
_req = self._parse_request(request, query, sformat, verify=False)
if self.events:
self.events.store("Request", _req)
_req_req: Union[Message, Dict[str, Any]] = {}
try:
_request = _req["request"]
except KeyError:
try:
_url = _req["request_uri"]
except KeyError:
pass
else:
_req_req = self.handle_request_uri(
_url, verify=False, sender=_req["client_id"]
)
else:
if isinstance(_request, Message):
_req_req = _request
else:
try:
_req_req = self.parse_jwt_request(
request, txt=_request, verify=False
)
except Exception:
_req_req = self._parse_request(
request, _request, "urlencoded", verify=False
)
else: # remove JWT attributes
for attr in JasonWebToken.c_param:
try:
del _req_req[attr]
except KeyError:
pass
if isinstance(_req_req, Response):
return _req_req
if _req_req:
if self.events:
self.events.store("Signed Request", _req_req)
for key, val in _req.items():
if key in ["request", "request_uri"]:
continue
if key not in _req_req:
_req_req[key] = val
_req = _req_req
if self.events:
self.events.store("Combined Request", _req)
try:
_req.verify(keyjar=self.keyjar)
except Exception as err:
if self.events:
self.events.store("Exception", err)
logger.error(err)
raise
return _req
def parse_jwt_request(
self,
request=AuthorizationRequest,
txt="",
keyjar=None,
verify=True,
sender="",
**kwargs,
):
"""Overridden to use OIC Message type."""
if "keys" in kwargs:
keyjar = kwargs["keys"]
warnings.warn(
"`keys` was renamed to `keyjar`, please update your code.",
DeprecationWarning,
stacklevel=2,
)
return super().parse_jwt_request(
request=request, txt=txt, keyjar=keyjar, verify=verify, sender=sender
)
def parse_check_session_request(self, url=None, query=None):
param = self._parse_urlencoded(url, query)
assert "id_token" in param # nosec, ignore the rest
return deser_id_token(self, param["id_token"][0])
def parse_check_id_request(self, url=None, query=None):
param = self._parse_urlencoded(url, query)
assert "access_token" in param # nosec, ignore the rest
return deser_id_token(self, param["access_token"][0])
def _parse_request(self, request_cls, data, sformat, client_id=None, verify=True):
if sformat == "json":
request = request_cls().from_json(data)
elif sformat == "jwt":
request = request_cls().from_jwt(data, keyjar=self.keyjar, sender=client_id)
elif sformat == "urlencoded":
if "?" in data:
parts = urlparse(data)
scheme, netloc, path, params, query, fragment = parts[:6]
else:
query = data
request = request_cls().from_urlencoded(query)
elif sformat == "dict":
request = request_cls(**data)
else:
raise ParseError(
"Unknown package format: '{}'".format(sformat), request_cls
)
# get the verification keys
if client_id:
keys = self.keyjar.verify_keys(client_id)
sender = client_id
else:
try:
keys = self.keyjar.verify_keys(request["client_id"])
sender = request["client_id"]
except KeyError:
keys = None
sender = ""
logger.debug("Found {} verify keys".format(len(keys or "")))
if verify:
request.verify(key=keys, keyjar=self.keyjar, sender=sender)
return request
def parse_open_id_request(self, data, sformat="urlencoded", client_id=None):
return self._parse_request(OpenIDRequest, data, sformat, client_id)
def parse_user_info_request(self, data, sformat="urlencoded"):
return self._parse_request(UserInfoRequest, data, sformat)
def parse_userinfo_request(self, data, sformat="urlencoded"):
return self._parse_request(UserInfoRequest, data, sformat)
def parse_refresh_session_request(self, url=None, query=None):
if url:
parts = urlparse(url)
query = parts.query
return RefreshSessionRequest().from_urlencoded(query)
def parse_registration_request(self, data, sformat="urlencoded"):
return self._parse_request(RegistrationRequest, data, sformat)
def parse_end_session_request(self, query, sformat="urlencoded"):
esr = self._parse_request(EndSessionRequest, query, sformat)
# if there is a id_token in there it is as a string
esr["id_token"] = deser_id_token(self, esr["id_token"])
return esr
@staticmethod
def update_claims(session, where, about, old_claims=None):
"""
Update claims dictionary.
:param session:
:param where: Which request
:param about: userinfo or id_token
:param old_claims:
:return: claims or None
"""
if old_claims is None:
old_claims = {}
req = None
if where == "oidreq":
try:
req = OpenIDRequest().deserialize(session[where], "json")
except KeyError:
pass
else: # where == "authzreq"
try:
req = AuthorizationRequest().deserialize(session[where], "json")
except KeyError:
pass
if req:
logger.debug("%s: %s" % (where, sanitize(req.to_dict())))
try:
_claims = req["claims"][about]
if _claims:
# update with old claims, do not overwrite
for key, val in old_claims.items():
if key not in _claims:
_claims[key] = val
return _claims
except KeyError:
pass
return old_claims
def id_token_claims(self, session):
"""
Pick the IdToken claims from the request.
:param session: Session information
:return: The IdToken claims
"""
itc: Dict[str, str] = {}
itc = self.update_claims(session, "authzreq", "id_token", itc)
itc = self.update_claims(session, "oidreq", "id_token", itc)
return itc
def make_id_token(
self,
session,
loa="2",
issuer="",
alg="RS256",
code=None,
access_token=None,
user_info=None,
auth_time=0,
exp=None,
extra_claims=None,
):
"""
Create ID Token.
:param session: Session information
:param loa: Level of Assurance/Authentication context
:param issuer: My identifier
:param alg: Which signing algorithm to use for the IdToken
:param code: Access grant
:param access_token: Access Token
:param user_info: If user info are to be part of the IdToken
:return: IDToken instance
"""
# defaults
if exp is None:
inawhile = {"days": 1}
else:
inawhile = exp
# Handle the idtoken_claims
extra = {}
itc = self.id_token_claims(session)
if itc.keys():
try:
inawhile = {"seconds": itc["max_age"]}
except KeyError:
pass
for key, val in itc.items():
if key == "auth_time":
extra["auth_time"] = auth_time
elif key == "acr":
extra["acr"] = verify_acr_level(val, loa)
else:
if auth_time:
extra["auth_time"] = auth_time
if loa:
extra["acr"] = loa
if not user_info:
_args: Dict[str, str] = {}
else:
try:
_args = user_info.to_dict()
except AttributeError:
_args = user_info
# Make sure that there are no name clashes
for key in ["iss", "sub", "aud", "exp", "acr", "nonce", "auth_time"]:
try:
del _args[key]
except KeyError:
pass
halg = "HS%s" % alg[-3:]
if extra_claims is not None:
_args.update(extra_claims)
if code:
_args["c_hash"] = jws.left_hash(code.encode("utf-8"), halg)
if access_token:
_args["at_hash"] = jws.left_hash(access_token.encode("utf-8"), halg)
idt = IdToken(
iss=issuer,
sub=session["sub"],
aud=session["client_id"],
exp=time_util.epoch_in_a_while(**inawhile),
acr=loa,
iat=time_util.utc_time_sans_frac(),
**_args,
)
for key, val in extra.items():
idt[key] = val
if "nonce" in session:
idt["nonce"] = session["nonce"]
return idt
def scope2claims(scopes, extra_scope_dict=None):
res: Dict[str, None] = {}
# Construct the scope translation map
trans_map: Dict[str, Any] = SCOPE2CLAIMS.copy()
if extra_scope_dict is not None:
trans_map.update(extra_scope_dict)
for scope in scopes:
try:
claims = dict([(name, None) for name in trans_map[scope]])
res.update(claims)
except KeyError:
continue
return res
| ./CrossVul/dataset_final_sorted/CWE-347/py/bad_4360_0 |
crossvul-python_data_bad_4085_2 | #!/usr/bin/env python
# Copyright 2012 - 2017, New York University and the TUF contributors
# SPDX-License-Identifier: MIT OR Apache-2.0
"""
<Program Name>
sig.py
<Author>
Vladimir Diaz <vladimir.v.diaz@gmail.com>
<Started>
February 28, 2012. Based on a previous version by Geremy Condra.
<Copyright>
See LICENSE-MIT OR LICENSE for licensing information.
<Purpose>
Survivable key compromise is one feature of a secure update system
incorporated into TUF's design. Responsibility separation through
the use of multiple roles, multi-signature trust, and explicit and
implicit key revocation are some of the mechanisms employed towards
this goal of survivability. These mechanisms can all be seen in
play by the functions available in this module.
The signed metadata files utilized by TUF to download target files
securely are used and represented here as the 'signable' object.
More precisely, the signature structures contained within these metadata
files are packaged into 'signable' dictionaries. This module makes it
possible to capture the states of these signatures by organizing the
keys into different categories. As keys are added and removed, the
system must securely and efficiently verify the status of these signatures.
For instance, a bunch of keys have recently expired. How many valid keys
are now available to the Snapshot role? This question can be answered by
get_signature_status(), which will return a full 'status report' of these
'signable' dicts. This module also provides a convenient verify() function
that will determine if a role still has a sufficient number of valid keys.
If a caller needs to update the signatures of a 'signable' object, there
is also a function for that.
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
import tuf
import tuf.keydb
import tuf.roledb
import tuf.formats
import securesystemslib
# See 'log.py' to learn how logging is handled in TUF.
logger = logging.getLogger('tuf.sig')
# Disable 'iso8601' logger messages to prevent 'iso8601' from clogging the
# log file.
iso8601_logger = logging.getLogger('iso8601')
iso8601_logger.disabled = True
def get_signature_status(signable, role=None, repository_name='default',
threshold=None, keyids=None):
"""
<Purpose>
Return a dictionary representing the status of the signatures listed in
'signable'. Given an object conformant to SIGNABLE_SCHEMA, a set of public
keys in 'tuf.keydb', a set of roles in 'tuf.roledb', and a role,
the status of these signatures can be determined. This method will iterate
the signatures in 'signable' and enumerate all the keys that are valid,
invalid, unrecognized, or unauthorized.
<Arguments>
signable:
A dictionary containing a list of signatures and a 'signed' identifier.
signable = {'signed': 'signer',
'signatures': [{'keyid': keyid,
'sig': sig}]}
Conformant to tuf.formats.SIGNABLE_SCHEMA.
role:
TUF role (e.g., 'root', 'targets', 'snapshot').
threshold:
Rather than reference the role's threshold as set in tuf.roledb.py, use
the given 'threshold' to calculate the signature status of 'signable'.
'threshold' is an integer value that sets the role's threshold value, or
the minimum number of signatures needed for metadata to be considered
fully signed.
keyids:
Similar to the 'threshold' argument, use the supplied list of 'keyids'
to calculate the signature status, instead of referencing the keyids
in tuf.roledb.py for 'role'.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'signable' does not have the
correct format.
tuf.exceptions.UnknownRoleError, if 'role' is not recognized.
<Side Effects>
None.
<Returns>
A dictionary representing the status of the signatures in 'signable'.
Conformant to tuf.formats.SIGNATURESTATUS_SCHEMA.
"""
# Do the arguments have the correct format? This check will ensure that
# arguments have the appropriate number of objects and object types, and that
# all dict keys are properly named. Raise
# 'securesystemslib.exceptions.FormatError' if the check fails.
tuf.formats.SIGNABLE_SCHEMA.check_match(signable)
securesystemslib.formats.NAME_SCHEMA.check_match(repository_name)
if role is not None:
tuf.formats.ROLENAME_SCHEMA.check_match(role)
if threshold is not None:
tuf.formats.THRESHOLD_SCHEMA.check_match(threshold)
if keyids is not None:
securesystemslib.formats.KEYIDS_SCHEMA.check_match(keyids)
# The signature status dictionary returned.
signature_status = {}
# The fields of the signature_status dict, where each field stores keyids. A
# description of each field:
#
# good_sigs = keys confirmed to have produced 'sig' using 'signed', which are
# associated with 'role';
#
# bad_sigs = negation of good_sigs;
#
# unknown_sigs = keys not found in the 'keydb' database;
#
# untrusted_sigs = keys that are not in the list of keyids associated with
# 'role';
#
# unknown_signing_scheme = signing schemes specified in keys that are
# unsupported;
good_sigs = []
bad_sigs = []
unknown_sigs = []
untrusted_sigs = []
unknown_signing_schemes = []
# Extract the relevant fields from 'signable' that will allow us to identify
# the different classes of keys (i.e., good_sigs, bad_sigs, etc.).
signed = securesystemslib.formats.encode_canonical(signable['signed']).encode('utf-8')
signatures = signable['signatures']
# Iterate the signatures and enumerate the signature_status fields.
# (i.e., good_sigs, bad_sigs, etc.).
for signature in signatures:
keyid = signature['keyid']
# Does the signature use an unrecognized key?
try:
key = tuf.keydb.get_key(keyid, repository_name)
except tuf.exceptions.UnknownKeyError:
unknown_sigs.append(keyid)
continue
# Does the signature use an unknown/unsupported signing scheme?
try:
valid_sig = securesystemslib.keys.verify_signature(key, signature, signed)
except securesystemslib.exceptions.UnsupportedAlgorithmError:
unknown_signing_schemes.append(keyid)
continue
# We are now dealing with either a trusted or untrusted key...
if valid_sig:
if role is not None:
# Is this an unauthorized key? (a keyid associated with 'role')
# Note that if the role is not known, tuf.exceptions.UnknownRoleError
# is raised here.
if keyids is None:
keyids = tuf.roledb.get_role_keyids(role, repository_name)
if keyid not in keyids:
untrusted_sigs.append(keyid)
continue
# This is an unset role, thus an unknown signature.
else:
unknown_sigs.append(keyid)
continue
# Identify good/authorized key.
good_sigs.append(keyid)
else:
# This is a bad signature for a trusted key.
bad_sigs.append(keyid)
# Retrieve the threshold value for 'role'. Raise
# tuf.exceptions.UnknownRoleError if we were given an invalid role.
if role is not None:
if threshold is None:
# Note that if the role is not known, tuf.exceptions.UnknownRoleError is
# raised here.
threshold = tuf.roledb.get_role_threshold(
role, repository_name=repository_name)
else:
logger.debug('Not using roledb.py\'s threshold for ' + repr(role))
else:
threshold = 0
# Build the signature_status dict.
signature_status['threshold'] = threshold
signature_status['good_sigs'] = good_sigs
signature_status['bad_sigs'] = bad_sigs
signature_status['unknown_sigs'] = unknown_sigs
signature_status['untrusted_sigs'] = untrusted_sigs
signature_status['unknown_signing_schemes'] = unknown_signing_schemes
return signature_status
def verify(signable, role, repository_name='default', threshold=None,
keyids=None):
"""
<Purpose>
Verify whether the authorized signatures of 'signable' meet the minimum
required by 'role'. Authorized signatures are those with valid keys
associated with 'role'. 'signable' must conform to SIGNABLE_SCHEMA
and 'role' must not equal 'None' or be less than zero.
<Arguments>
signable:
A dictionary containing a list of signatures and a 'signed' identifier.
signable = {'signed':, 'signatures': [{'keyid':, 'method':, 'sig':}]}
role:
TUF role (e.g., 'root', 'targets', 'snapshot').
threshold:
Rather than reference the role's threshold as set in tuf.roledb.py, use
the given 'threshold' to calculate the signature status of 'signable'.
'threshold' is an integer value that sets the role's threshold value, or
the minimum number of signatures needed for metadata to be considered
fully signed.
keyids:
Similar to the 'threshold' argument, use the supplied list of 'keyids'
to calculate the signature status, instead of referencing the keyids
in tuf.roledb.py for 'role'.
<Exceptions>
tuf.exceptions.UnknownRoleError, if 'role' is not recognized.
securesystemslib.exceptions.FormatError, if 'signable' is not formatted
correctly.
securesystemslib.exceptions.Error, if an invalid threshold is encountered.
<Side Effects>
tuf.sig.get_signature_status() called. Any exceptions thrown by
get_signature_status() will be caught here and re-raised.
<Returns>
Boolean. True if the number of good signatures >= the role's threshold,
False otherwise.
"""
tuf.formats.SIGNABLE_SCHEMA.check_match(signable)
tuf.formats.ROLENAME_SCHEMA.check_match(role)
securesystemslib.formats.NAME_SCHEMA.check_match(repository_name)
# Retrieve the signature status. tuf.sig.get_signature_status() raises:
# tuf.exceptions.UnknownRoleError
# securesystemslib.exceptions.FormatError. 'threshold' and 'keyids' are also
# validated.
status = get_signature_status(signable, role, repository_name, threshold, keyids)
# Retrieve the role's threshold and the authorized keys of 'status'
threshold = status['threshold']
good_sigs = status['good_sigs']
# Does 'status' have the required threshold of signatures?
# First check for invalid threshold values before returning result.
# Note: get_signature_status() is expected to verify that 'threshold' is
# not None or <= 0.
if threshold is None or threshold <= 0: #pragma: no cover
raise securesystemslib.exceptions.Error("Invalid threshold: " + repr(threshold))
return len(good_sigs) >= threshold
def may_need_new_keys(signature_status):
"""
<Purpose>
Return true iff downloading a new set of keys might tip this
signature status over to valid. This is determined by checking
if either the number of unknown or untrusted keys is > 0.
<Arguments>
signature_status:
The dictionary returned by tuf.sig.get_signature_status().
<Exceptions>
securesystemslib.exceptions.FormatError, if 'signature_status does not have
the correct format.
<Side Effects>
None.
<Returns>
Boolean.
"""
# Does 'signature_status' have the correct format?
# This check will ensure 'signature_status' has the appropriate number
# of objects and object types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fails.
tuf.formats.SIGNATURESTATUS_SCHEMA.check_match(signature_status)
unknown = signature_status['unknown_sigs']
untrusted = signature_status['untrusted_sigs']
return len(unknown) or len(untrusted)
def generate_rsa_signature(signed, rsakey_dict):
"""
<Purpose>
Generate a new signature dict presumably to be added to the 'signatures'
field of 'signable'. The 'signable' dict is of the form:
{'signed': 'signer',
'signatures': [{'keyid': keyid,
'method': 'evp',
'sig': sig}]}
The 'signed' argument is needed here for the signing process.
The 'rsakey_dict' argument is used to generate 'keyid', 'method', and 'sig'.
The caller should ensure the returned signature is not already in
'signable'.
<Arguments>
signed:
The data used by 'securesystemslib.keys.create_signature()' to generate
signatures. It is stored in the 'signed' field of 'signable'.
rsakey_dict:
The RSA key, a 'securesystemslib.formats.RSAKEY_SCHEMA' dictionary.
Used here to produce 'keyid', 'method', and 'sig'.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'rsakey_dict' does not have the
correct format.
TypeError, if a private key is not defined for 'rsakey_dict'.
<Side Effects>
None.
<Returns>
Signature dictionary conformant to securesystemslib.formats.SIGNATURE_SCHEMA.
Has the form:
{'keyid': keyid, 'method': 'evp', 'sig': sig}
"""
# We need 'signed' in canonical JSON format to generate
# the 'method' and 'sig' fields of the signature.
signed = securesystemslib.formats.encode_canonical(signed).encode('utf-8')
# Generate the RSA signature.
# Raises securesystemslib.exceptions.FormatError and TypeError.
signature = securesystemslib.keys.create_signature(rsakey_dict, signed)
return signature
| ./CrossVul/dataset_final_sorted/CWE-347/py/bad_4085_2 |
crossvul-python_data_bad_1889_0 | """ Functions connected to signing and verifying.
Based on the use of xmlsec1 binaries and not the python xmlsec module.
"""
import base64
import hashlib
import itertools
import logging
import os
import six
from uuid import uuid4 as gen_random_key
from time import mktime
from tempfile import NamedTemporaryFile
from subprocess import Popen
from subprocess import PIPE
from importlib_resources import path as _resource_path
from OpenSSL import crypto
import pytz
from six.moves.urllib import parse
import saml2.cryptography.asymmetric
import saml2.cryptography.pki
import saml2.xmldsig as ds
import saml2.data.templates as _data_template
from saml2 import samlp
from saml2 import SamlBase
from saml2 import SAMLError
from saml2 import extension_elements_to_elements
from saml2 import class_name
from saml2 import saml
from saml2 import ExtensionElement
from saml2 import VERSION
from saml2.cert import OpenSSLWrapper
from saml2.extension import pefim
from saml2.extension.pefim import SPCertEnc
from saml2.saml import EncryptedAssertion
from saml2.s_utils import sid
from saml2.s_utils import Unsupported
from saml2.time_util import instant
from saml2.time_util import str_to_time
from saml2.xmldsig import SIG_RSA_SHA1
from saml2.xmldsig import SIG_RSA_SHA224
from saml2.xmldsig import SIG_RSA_SHA256
from saml2.xmldsig import SIG_RSA_SHA384
from saml2.xmldsig import SIG_RSA_SHA512
from saml2.xmlenc import EncryptionMethod
from saml2.xmlenc import EncryptedKey
from saml2.xmlenc import CipherData
from saml2.xmlenc import CipherValue
from saml2.xmlenc import EncryptedData
logger = logging.getLogger(__name__)
SIG = '{{{ns}#}}{attribute}'.format(ns=ds.NAMESPACE, attribute='Signature')
RSA_1_5 = 'http://www.w3.org/2001/04/xmlenc#rsa-1_5'
TRIPLE_DES_CBC = 'http://www.w3.org/2001/04/xmlenc#tripledes-cbc'
class SigverError(SAMLError):
pass
class CertificateTooOld(SigverError):
pass
class XmlsecError(SigverError):
pass
class MissingKey(SigverError):
pass
class DecryptError(XmlsecError):
pass
class EncryptError(XmlsecError):
pass
class SignatureError(XmlsecError):
pass
class BadSignature(SigverError):
"""The signature is invalid."""
pass
class CertificateError(SigverError):
pass
def read_file(*args, **kwargs):
with open(*args, **kwargs) as handler:
return handler.read()
def rm_xmltag(statement):
XMLTAG = "<?xml version='1.0'?>"
PREFIX1 = "<?xml version='1.0' encoding='UTF-8'?>"
PREFIX2 = '<?xml version="1.0" encoding="UTF-8"?>'
try:
_t = statement.startswith(XMLTAG)
except TypeError:
statement = statement.decode()
_t = statement.startswith(XMLTAG)
if _t:
statement = statement[len(XMLTAG):]
if statement[0] == '\n':
statement = statement[1:]
elif statement.startswith(PREFIX1):
statement = statement[len(PREFIX1):]
if statement[0] == '\n':
statement = statement[1:]
elif statement.startswith(PREFIX2):
statement = statement[len(PREFIX2):]
if statement[0] == '\n':
statement = statement[1:]
return statement
def signed(item):
"""
Is any part of the document signed ?
:param item: A Samlbase instance
:return: True if some part of it is signed
"""
if SIG in item.c_children.keys() and item.signature:
return True
else:
for prop in item.c_child_order:
child = getattr(item, prop, None)
if isinstance(child, list):
for chi in child:
if signed(chi):
return True
elif child and signed(child):
return True
return False
def get_xmlsec_binary(paths=None):
"""
Tries to find the xmlsec1 binary.
:param paths: Non-system path paths which should be searched when
looking for xmlsec1
:return: full name of the xmlsec1 binary found. If no binaries are
found then an exception is raised.
"""
if os.name == 'posix':
bin_name = ['xmlsec1']
elif os.name == 'nt':
bin_name = ['xmlsec.exe', 'xmlsec1.exe']
else: # Default !?
bin_name = ['xmlsec1']
if paths:
for bname in bin_name:
for path in paths:
fil = os.path.join(path, bname)
try:
if os.lstat(fil):
return fil
except OSError:
pass
for path in os.environ['PATH'].split(os.pathsep):
for bname in bin_name:
fil = os.path.join(path, bname)
try:
if os.lstat(fil):
return fil
except OSError:
pass
raise SigverError('Cannot find {binary}'.format(binary=bin_name))
def _get_xmlsec_cryptobackend(path=None, search_paths=None, delete_tmpfiles=True):
"""
Initialize a CryptoBackendXmlSec1 crypto backend.
This function is now internal to this module.
"""
if path is None:
path = get_xmlsec_binary(paths=search_paths)
return CryptoBackendXmlSec1(path, delete_tmpfiles=delete_tmpfiles)
NODE_NAME = 'urn:oasis:names:tc:SAML:2.0:assertion:Assertion'
ENC_NODE_NAME = 'urn:oasis:names:tc:SAML:2.0:assertion:EncryptedAssertion'
ENC_KEY_CLASS = 'EncryptedKey'
def _make_vals(val, klass, seccont, klass_inst=None, prop=None, part=False,
base64encode=False, elements_to_sign=None):
"""
Creates a class instance with a specified value, the specified
class instance may be a value on a property in a defined class instance.
:param val: The value
:param klass: The value class
:param klass_inst: The class instance which has a property on which
what this function returns is a value.
:param prop: The property which the value should be assigned to.
:param part: If the value is one of a possible list of values it should be
handled slightly different compared to if it isn't.
:return: Value class instance
"""
cinst = None
if isinstance(val, dict):
cinst = _instance(klass, val, seccont, base64encode=base64encode,
elements_to_sign=elements_to_sign)
else:
try:
cinst = klass().set_text(val)
except ValueError:
if not part:
cis = [
_make_vals(
sval,
klass,
seccont,
klass_inst,
prop,
True,
base64encode,
elements_to_sign)
for sval in val
]
setattr(klass_inst, prop, cis)
else:
raise
if part:
return cinst
else:
if cinst:
cis = [cinst]
setattr(klass_inst, prop, cis)
def _instance(klass, ava, seccont, base64encode=False, elements_to_sign=None):
instance = klass()
for prop in instance.c_attributes.values():
if prop in ava:
if isinstance(ava[prop], bool):
setattr(instance, prop, str(ava[prop]).encode())
elif isinstance(ava[prop], int):
setattr(instance, prop, str(ava[prop]))
else:
setattr(instance, prop, ava[prop])
if 'text' in ava:
instance.set_text(ava['text'], base64encode)
for prop, klassdef in instance.c_children.values():
if prop in ava:
if isinstance(klassdef, list):
# means there can be a list of values
_make_vals(ava[prop], klassdef[0], seccont, instance, prop,
base64encode=base64encode,
elements_to_sign=elements_to_sign)
else:
cis = _make_vals(ava[prop], klassdef, seccont, instance, prop,
True, base64encode, elements_to_sign)
setattr(instance, prop, cis)
if 'extension_elements' in ava:
for item in ava['extension_elements']:
instance.extension_elements.append(
ExtensionElement(item['tag']).loadd(item))
if 'extension_attributes' in ava:
for key, val in ava['extension_attributes'].items():
instance.extension_attributes[key] = val
if 'signature' in ava:
elements_to_sign.append((class_name(instance), instance.id))
return instance
# XXX will actually sign the nodes
# XXX assumes pre_signature_part has already been called
# XXX calls sign without specifying sign_alg/digest_alg
# XXX this is fine as the algs are embeded in the document
# XXX as setup by pre_signature_part
# XXX !!expects instance string!!
def signed_instance_factory(instance, seccont, elements_to_sign=None):
"""
:param instance: The instance to be signed or not
:param seccont: The security context
:param elements_to_sign: Which parts if any that should be signed
:return: A class instance if not signed otherwise a string
"""
if not elements_to_sign:
return instance
signed_xml = instance
if not isinstance(instance, six.string_types):
signed_xml = instance.to_string()
for (node_name, nodeid) in elements_to_sign:
signed_xml = seccont.sign_statement(
signed_xml, node_name=node_name, node_id=nodeid
)
return signed_xml
def make_temp(content, suffix="", decode=True, delete_tmpfiles=True):
"""
Create a temporary file with the given content.
This is needed by xmlsec in some cases where only strings exist when files
are expected.
:param content: The information to be placed in the file
:param suffix: The temporary file might have to have a specific
suffix in certain circumstances.
:param decode: The input content might be base64 coded. If so it
must, in some cases, be decoded before being placed in the file.
:param delete_tmpfiles: Whether to keep the tmp files or delete them when they are
no longer in use
:return: 2-tuple with file pointer ( so the calling function can
close the file) and filename (which is for instance needed by the
xmlsec function).
"""
content_encoded = (
content.encode("utf-8") if not isinstance(content, six.binary_type) else content
)
content_raw = base64.b64decode(content_encoded) if decode else content_encoded
ntf = NamedTemporaryFile(suffix=suffix, delete=delete_tmpfiles)
ntf.write(content_raw)
ntf.seek(0)
return ntf
def split_len(seq, length):
return [seq[i:i + length] for i in range(0, len(seq), length)]
M2_TIME_FORMAT = '%b %d %H:%M:%S %Y'
def to_time(_time):
if not _time.endswith(' GMT'):
raise ValueError('Time does not end with GMT')
_time = _time[:-4]
return mktime(str_to_time(_time, M2_TIME_FORMAT))
def active_cert(key):
"""
Verifies that a key is active that is present time is after not_before
and before not_after.
:param key: The Key
:return: True if the key is active else False
"""
try:
cert_str = pem_format(key)
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
except AttributeError:
return False
now = pytz.UTC.localize(datetime.datetime.utcnow())
valid_from = dateutil.parser.parse(cert.get_notBefore())
valid_to = dateutil.parser.parse(cert.get_notAfter())
active = not cert.has_expired() and valid_from <= now < valid_to
return active
def cert_from_key_info(key_info, ignore_age=False):
""" Get all X509 certs from a KeyInfo instance. Care is taken to make sure
that the certs are continues sequences of bytes.
All certificates appearing in an X509Data element MUST relate to the
validation key by either containing it or being part of a certification
chain that terminates in a certificate containing the validation key.
:param key_info: The KeyInfo instance
:return: A possibly empty list of certs
"""
res = []
for x509_data in key_info.x509_data:
x509_certificate = x509_data.x509_certificate
cert = x509_certificate.text.strip()
cert = '\n'.join(split_len(''.join([s.strip() for s in
cert.split()]), 64))
if ignore_age or active_cert(cert):
res.append(cert)
else:
logger.info('Inactive cert')
return res
def cert_from_key_info_dict(key_info, ignore_age=False):
""" Get all X509 certs from a KeyInfo dictionary. Care is taken to make sure
that the certs are continues sequences of bytes.
All certificates appearing in an X509Data element MUST relate to the
validation key by either containing it or being part of a certification
chain that terminates in a certificate containing the validation key.
:param key_info: The KeyInfo dictionary
:return: A possibly empty list of certs in their text representation
"""
res = []
if 'x509_data' not in key_info:
return res
for x509_data in key_info['x509_data']:
x509_certificate = x509_data['x509_certificate']
cert = x509_certificate['text'].strip()
cert = '\n'.join(split_len(''.join(
[s.strip() for s in cert.split()]), 64))
if ignore_age or active_cert(cert):
res.append(cert)
else:
logger.info('Inactive cert')
return res
def cert_from_instance(instance):
""" Find certificates that are part of an instance
:param instance: An instance
:return: possible empty list of certificates
"""
if instance.signature:
if instance.signature.key_info:
return cert_from_key_info(instance.signature.key_info,
ignore_age=True)
return []
def extract_rsa_key_from_x509_cert(pem):
cert = saml2.cryptography.pki.load_pem_x509_certificate(pem)
return cert.public_key()
def pem_format(key):
return os.linesep.join([
'-----BEGIN CERTIFICATE-----',
key,
'-----END CERTIFICATE-----'
]).encode('ascii')
def import_rsa_key_from_file(filename):
data = read_file(filename, 'rb')
key = saml2.cryptography.asymmetric.load_pem_private_key(data, None)
return key
def parse_xmlsec_output(output):
""" Parse the output from xmlsec to try to find out if the
command was successfull or not.
:param output: The output from Popen
:return: A boolean; True if the command was a success otherwise False
"""
for line in output.splitlines():
if line == 'OK':
return True
elif line == 'FAIL':
raise XmlsecError(output)
raise XmlsecError(output)
def sha1_digest(msg):
return hashlib.sha1(msg).digest()
class Signer(object):
"""Abstract base class for signing algorithms."""
def __init__(self, key):
self.key = key
def sign(self, msg, key):
"""Sign ``msg`` with ``key`` and return the signature."""
raise NotImplementedError
def verify(self, msg, sig, key):
"""Return True if ``sig`` is a valid signature for ``msg``."""
raise NotImplementedError
class RSASigner(Signer):
def __init__(self, digest, key=None):
Signer.__init__(self, key)
self.digest = digest
def sign(self, msg, key=None):
return saml2.cryptography.asymmetric.key_sign(
key or self.key, msg, self.digest)
def verify(self, msg, sig, key=None):
return saml2.cryptography.asymmetric.key_verify(
key or self.key, sig, msg, self.digest)
SIGNER_ALGS = {
SIG_RSA_SHA1: RSASigner(saml2.cryptography.asymmetric.hashes.SHA1()),
SIG_RSA_SHA224: RSASigner(saml2.cryptography.asymmetric.hashes.SHA224()),
SIG_RSA_SHA256: RSASigner(saml2.cryptography.asymmetric.hashes.SHA256()),
SIG_RSA_SHA384: RSASigner(saml2.cryptography.asymmetric.hashes.SHA384()),
SIG_RSA_SHA512: RSASigner(saml2.cryptography.asymmetric.hashes.SHA512()),
}
REQ_ORDER = [
'SAMLRequest',
'RelayState',
'SigAlg',
]
RESP_ORDER = [
'SAMLResponse',
'RelayState',
'SigAlg',
]
class RSACrypto(object):
def __init__(self, key):
self.key = key
def get_signer(self, sigalg, sigkey=None):
try:
signer = SIGNER_ALGS[sigalg]
except KeyError:
return None
else:
if sigkey:
signer.key = sigkey
else:
signer.key = self.key
return signer
def verify_redirect_signature(saml_msg, crypto, cert=None, sigkey=None):
"""
:param saml_msg: A dictionary with strings as values, *NOT* lists as
produced by parse_qs.
:param cert: A certificate to use when verifying the signature
:return: True, if signature verified
"""
try:
signer = crypto.get_signer(saml_msg['SigAlg'], sigkey)
except KeyError:
raise Unsupported('Signature algorithm: {alg}'.format(alg=saml_msg['SigAlg']))
else:
if saml_msg['SigAlg'] in SIGNER_ALGS:
if 'SAMLRequest' in saml_msg:
_order = REQ_ORDER
elif 'SAMLResponse' in saml_msg:
_order = RESP_ORDER
else:
raise Unsupported(
'Verifying signature on something that should not be signed'
)
_args = saml_msg.copy()
del _args['Signature'] # everything but the signature
string = '&'.join(
[
parse.urlencode({k: _args[k]})
for k in _order
if k in _args
]
).encode('ascii')
if cert:
_key = extract_rsa_key_from_x509_cert(pem_format(cert))
else:
_key = sigkey
_sign = base64.b64decode(saml_msg['Signature'])
return bool(signer.verify(string, _sign, _key))
def make_str(txt):
if isinstance(txt, six.string_types):
return txt
else:
return txt.decode()
def read_cert_from_file(cert_file, cert_type):
""" Reads a certificate from a file. The assumption is that there is
only one certificate in the file
:param cert_file: The name of the file
:param cert_type: The certificate type
:return: A base64 encoded certificate as a string or the empty string
"""
if not cert_file:
return ''
if cert_type == 'pem':
_a = read_file(cert_file, 'rb').decode()
_b = _a.replace('\r\n', '\n')
lines = _b.split('\n')
for pattern in (
'-----BEGIN CERTIFICATE-----',
'-----BEGIN PUBLIC KEY-----'):
if pattern in lines:
lines = lines[lines.index(pattern) + 1:]
break
else:
raise CertificateError('Strange beginning of PEM file')
for pattern in (
'-----END CERTIFICATE-----',
'-----END PUBLIC KEY-----'):
if pattern in lines:
lines = lines[:lines.index(pattern)]
break
else:
raise CertificateError('Strange end of PEM file')
return make_str(''.join(lines).encode())
if cert_type in ['der', 'cer', 'crt']:
data = read_file(cert_file, 'rb')
_cert = base64.b64encode(data)
return make_str(_cert)
class CryptoBackend(object):
def version(self):
raise NotImplementedError()
def encrypt(self, text, recv_key, template, key_type):
raise NotImplementedError()
def encrypt_assertion(self, statement, enc_key, template, key_type, node_xpath):
raise NotImplementedError()
def decrypt(self, enctext, key_file):
raise NotImplementedError()
def sign_statement(self, statement, node_name, key_file, node_id):
raise NotImplementedError()
def validate_signature(self, enctext, cert_file, cert_type, node_name, node_id):
raise NotImplementedError()
ASSERT_XPATH = ''.join([
'/*[local-name()=\'{name}\']'.format(name=n)
for n in ['Response', 'EncryptedAssertion', 'Assertion']
])
class CryptoBackendXmlSec1(CryptoBackend):
"""
CryptoBackend implementation using external binary 1 to sign
and verify XML documents.
"""
__DEBUG = 0
def __init__(self, xmlsec_binary, delete_tmpfiles=True, **kwargs):
CryptoBackend.__init__(self, **kwargs)
if not isinstance(xmlsec_binary, six.string_types):
raise ValueError("xmlsec_binary should be of type string")
self.xmlsec = xmlsec_binary
self.delete_tmpfiles = delete_tmpfiles
try:
self.non_xml_crypto = RSACrypto(kwargs['rsa_key'])
except KeyError:
pass
def version(self):
com_list = [self.xmlsec, '--version']
pof = Popen(com_list, stderr=PIPE, stdout=PIPE)
content, _ = pof.communicate()
content = content.decode('ascii')
try:
return content.split(' ')[1]
except IndexError:
return ''
def encrypt(self, text, recv_key, template, session_key_type, xpath=''):
"""
:param text: The text to be compiled
:param recv_key: Filename of a file where the key resides
:param template: Filename of a file with the pre-encryption part
:param session_key_type: Type and size of a new session key
'des-192' generates a new 192 bits DES key for DES3 encryption
:param xpath: What should be encrypted
:return:
"""
logger.debug('Encryption input len: %d', len(text))
tmp = make_temp(text, decode=False, delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--encrypt',
'--pubkey-cert-pem', recv_key,
'--session-key', session_key_type,
'--xml-data', tmp.name,
]
if xpath:
com_list.extend(['--node-xpath', xpath])
try:
(_stdout, _stderr, output) = self._run_xmlsec(com_list, [template])
except XmlsecError as e:
six.raise_from(EncryptError(com_list), e)
return output
def encrypt_assertion(self, statement, enc_key, template, key_type='des-192', node_xpath=None, node_id=None):
"""
Will encrypt an assertion
:param statement: A XML document that contains the assertion to encrypt
:param enc_key: File name of a file containing the encryption key
:param template: A template for the encryption part to be added.
:param key_type: The type of session key to use.
:return: The encrypted text
"""
if isinstance(statement, SamlBase):
statement = pre_encrypt_assertion(statement)
tmp = make_temp(str(statement),
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
tmp2 = make_temp(str(template),
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
if not node_xpath:
node_xpath = ASSERT_XPATH
com_list = [
self.xmlsec,
'--encrypt',
'--pubkey-cert-pem', enc_key,
'--session-key', key_type,
'--xml-data', tmp.name,
'--node-xpath', node_xpath,
]
if node_id:
com_list.extend(['--node-id', node_id])
try:
(_stdout, _stderr, output) = self._run_xmlsec(com_list, [tmp2.name])
except XmlsecError as e:
six.raise_from(EncryptError(com_list), e)
return output.decode('utf-8')
def decrypt(self, enctext, key_file):
"""
:param enctext: XML document containing an encrypted part
:param key_file: The key to use for the decryption
:return: The decrypted document
"""
logger.debug('Decrypt input len: %d', len(enctext))
tmp = make_temp(enctext, decode=False, delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--decrypt',
'--privkey-pem', key_file,
'--id-attr:Id', ENC_KEY_CLASS,
]
try:
(_stdout, _stderr, output) = self._run_xmlsec(com_list, [tmp.name])
except XmlsecError as e:
six.raise_from(DecryptError(com_list), e)
return output.decode('utf-8')
def sign_statement(self, statement, node_name, key_file, node_id):
"""
Sign an XML statement.
:param statement: The statement to be signed
:param node_name: string like 'urn:oasis:names:...:Assertion'
:param key_file: The file where the key can be found
:param node_id:
:return: The signed statement
"""
if isinstance(statement, SamlBase):
statement = str(statement)
tmp = make_temp(statement,
suffix=".xml",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--sign',
'--privkey-pem', key_file,
'--id-attr:ID', node_name,
]
if node_id:
com_list.extend(['--node-id', node_id])
try:
(stdout, stderr, output) = self._run_xmlsec(com_list, [tmp.name])
except XmlsecError as e:
raise SignatureError(com_list)
# this does not work if --store-signatures is used
if output:
return output.decode("utf-8")
if stdout:
return stdout.decode("utf-8")
raise SignatureError(stderr)
def validate_signature(self, signedtext, cert_file, cert_type, node_name, node_id):
"""
Validate signature on XML document.
:param signedtext: The XML document as a string
:param cert_file: The public key that was used to sign the document
:param cert_type: The file type of the certificate
:param node_name: The name of the class that is signed
:param node_id: The identifier of the node
:return: Boolean True if the signature was correct otherwise False.
"""
if not isinstance(signedtext, six.binary_type):
signedtext = signedtext.encode('utf-8')
tmp = make_temp(signedtext,
suffix=".xml",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--verify',
'--enabled-reference-uris', 'empty,same-doc',
'--pubkey-cert-{type}'.format(type=cert_type), cert_file,
'--id-attr:ID', node_name,
]
if node_id:
com_list.extend(['--node-id', node_id])
try:
(_stdout, stderr, _output) = self._run_xmlsec(com_list, [tmp.name])
except XmlsecError as e:
six.raise_from(SignatureError(com_list), e)
return parse_xmlsec_output(stderr)
def _run_xmlsec(self, com_list, extra_args):
"""
Common code to invoke xmlsec and parse the output.
:param com_list: Key-value parameter list for xmlsec
:param extra_args: Positional parameters to be appended after all
key-value parameters
:result: Whatever xmlsec wrote to an --output temporary file
"""
with NamedTemporaryFile(suffix='.xml') as ntf:
com_list.extend(['--output', ntf.name])
com_list += extra_args
logger.debug('xmlsec command: %s', ' '.join(com_list))
pof = Popen(com_list, stderr=PIPE, stdout=PIPE)
p_out, p_err = pof.communicate()
p_out = p_out.decode()
p_err = p_err.decode()
if pof.returncode != 0:
errmsg = "returncode={code}\nerror={err}\noutput={out}".format(
code=pof.returncode, err=p_err, out=p_out
)
logger.error(errmsg)
raise XmlsecError(errmsg)
ntf.seek(0)
return p_out, p_err, ntf.read()
class CryptoBackendXMLSecurity(CryptoBackend):
"""
CryptoBackend implementation using pyXMLSecurity to sign and verify
XML documents.
Encrypt and decrypt is currently unsupported by pyXMLSecurity.
pyXMLSecurity uses lxml (libxml2) to parse XML data, but otherwise
try to get by with native Python code. It does native Python RSA
signatures, or alternatively PyKCS11 to offload cryptographic work
to an external PKCS#11 module.
"""
def __init__(self):
CryptoBackend.__init__(self)
def version(self):
# XXX if XMLSecurity.__init__ included a __version__, that would be
# better than static 0.0 here.
return 'XMLSecurity 0.0'
def sign_statement(self, statement, node_name, key_file, node_id):
"""
Sign an XML statement.
The parameters actually used in this CryptoBackend
implementation are :
:param statement: XML as string
:param node_name: Name of the node to sign
:param key_file: xmlsec key_spec string(), filename,
'pkcs11://' URI or PEM data
:returns: Signed XML as string
"""
import xmlsec
import lxml.etree
xml = xmlsec.parse_xml(statement)
signed = xmlsec.sign(xml, key_file)
signed_str = lxml.etree.tostring(signed, xml_declaration=False, encoding="UTF-8")
if not isinstance(signed_str, six.string_types):
signed_str = signed_str.decode("utf-8")
return signed_str
def validate_signature(self, signedtext, cert_file, cert_type, node_name, node_id):
"""
Validate signature on XML document.
The parameters actually used in this CryptoBackend
implementation are :
:param signedtext: The signed XML data as string
:param cert_file: xmlsec key_spec string(), filename,
'pkcs11://' URI or PEM data
:param cert_type: string, must be 'pem' for now
:returns: True on successful validation, False otherwise
"""
if cert_type != 'pem':
raise Unsupported('Only PEM certs supported here')
import xmlsec
xml = xmlsec.parse_xml(signedtext)
try:
return xmlsec.verify(xml, cert_file)
except xmlsec.XMLSigException:
return False
def security_context(conf):
""" Creates a security context based on the configuration
:param conf: The configuration, this is a Config instance
:return: A SecurityContext instance
"""
if not conf:
return None
try:
metadata = conf.metadata
except AttributeError:
metadata = None
sec_backend = None
if conf.crypto_backend == 'xmlsec1':
xmlsec_binary = conf.xmlsec_binary
if not xmlsec_binary:
try:
_path = conf.xmlsec_path
except AttributeError:
_path = []
xmlsec_binary = get_xmlsec_binary(_path)
# verify that xmlsec is where it's supposed to be
if not os.path.exists(xmlsec_binary):
# if not os.access(, os.F_OK):
err_msg = 'xmlsec binary not found: {binary}'
err_msg = err_msg.format(binary=xmlsec_binary)
raise SigverError(err_msg)
crypto = _get_xmlsec_cryptobackend(xmlsec_binary,
delete_tmpfiles=conf.delete_tmpfiles)
_file_name = conf.getattr('key_file', '')
if _file_name:
try:
rsa_key = import_rsa_key_from_file(_file_name)
except Exception as err:
logger.error('Cannot import key from {file}: {err_msg}'.format(
file=_file_name, err_msg=err))
raise
else:
sec_backend = RSACrypto(rsa_key)
elif conf.crypto_backend == 'XMLSecurity':
# new and somewhat untested pyXMLSecurity crypto backend.
crypto = CryptoBackendXMLSecurity()
else:
err_msg = 'Unknown crypto_backend {backend}'
err_msg = err_msg.format(backend=conf.crypto_backend)
raise SigverError(err_msg)
enc_key_files = []
if conf.encryption_keypairs is not None:
for _encryption_keypair in conf.encryption_keypairs:
if 'key_file' in _encryption_keypair:
enc_key_files.append(_encryption_keypair['key_file'])
return SecurityContext(
crypto,
conf.key_file,
cert_file=conf.cert_file,
metadata=metadata,
only_use_keys_in_metadata=conf.only_use_keys_in_metadata,
cert_handler_extra_class=conf.cert_handler_extra_class,
generate_cert_info=conf.generate_cert_info,
tmp_cert_file=conf.tmp_cert_file,
tmp_key_file=conf.tmp_key_file,
validate_certificate=conf.validate_certificate,
enc_key_files=enc_key_files,
encryption_keypairs=conf.encryption_keypairs,
sec_backend=sec_backend,
delete_tmpfiles=conf.delete_tmpfiles)
def encrypt_cert_from_item(item):
_encrypt_cert = None
try:
try:
_elem = extension_elements_to_elements(
item.extensions.extension_elements, [pefim, ds])
except:
_elem = extension_elements_to_elements(
item.extension_elements[0].children,
[pefim, ds])
for _tmp_elem in _elem:
if isinstance(_tmp_elem, SPCertEnc):
for _tmp_key_info in _tmp_elem.key_info:
if _tmp_key_info.x509_data is not None and len(
_tmp_key_info.x509_data) > 0:
_encrypt_cert = _tmp_key_info.x509_data[
0].x509_certificate.text
break
except Exception as _exception:
pass
if _encrypt_cert is not None:
if _encrypt_cert.find('-----BEGIN CERTIFICATE-----\n') == -1:
_encrypt_cert = '-----BEGIN CERTIFICATE-----\n' + _encrypt_cert
if _encrypt_cert.find('\n-----END CERTIFICATE-----') == -1:
_encrypt_cert = _encrypt_cert + '\n-----END CERTIFICATE-----'
return _encrypt_cert
class CertHandlerExtra(object):
def __init__(self):
pass
def use_generate_cert_func(self):
raise Exception('use_generate_cert_func function must be implemented')
def generate_cert(self, generate_cert_info, root_cert_string,
root_key_string):
raise Exception('generate_cert function must be implemented')
# Excepts to return (cert_string, key_string)
def use_validate_cert_func(self):
raise Exception('use_validate_cert_func function must be implemented')
def validate_cert(self, cert_str, root_cert_string, root_key_string):
raise Exception('validate_cert function must be implemented')
# Excepts to return True/False
class CertHandler(object):
def __init__(
self,
security_context,
cert_file=None, cert_type='pem',
key_file=None, key_type='pem',
generate_cert_info=None,
cert_handler_extra_class=None,
tmp_cert_file=None,
tmp_key_file=None,
verify_cert=False):
"""
Initiates the class for handling certificates. Enables the certificates
to either be a single certificate as base functionality or makes it
possible to generate a new certificate for each call to the function.
:param security_context:
:param cert_file:
:param cert_type:
:param key_file:
:param key_type:
:param generate_cert_info:
:param cert_handler_extra_class:
:param tmp_cert_file:
:param tmp_key_file:
:param verify_cert:
"""
self._verify_cert = False
self._generate_cert = False
# This cert do not have to be valid, it is just the last cert to be
# validated.
self._last_cert_verified = None
self._last_validated_cert = None
if cert_type == 'pem' and key_type == 'pem':
self._verify_cert = verify_cert is True
self._security_context = security_context
self._osw = OpenSSLWrapper()
if key_file and os.path.isfile(key_file):
self._key_str = self._osw.read_str_from_file(key_file, key_type)
else:
self._key_str = ''
if cert_file and os.path.isfile(cert_file):
self._cert_str = self._osw.read_str_from_file(cert_file,
cert_type)
else:
self._cert_str = ''
self._tmp_cert_str = self._cert_str
self._tmp_key_str = self._key_str
self._tmp_cert_file = tmp_cert_file
self._tmp_key_file = tmp_key_file
self._cert_info = None
self._generate_cert_func_active = False
if generate_cert_info is not None \
and len(self._cert_str) > 0 \
and len(self._key_str) > 0 \
and tmp_key_file is not None \
and tmp_cert_file is not None:
self._generate_cert = True
self._cert_info = generate_cert_info
self._cert_handler_extra_class = cert_handler_extra_class
def verify_cert(self, cert_file):
if self._verify_cert:
if cert_file and os.path.isfile(cert_file):
cert_str = self._osw.read_str_from_file(cert_file, 'pem')
else:
return False
self._last_validated_cert = cert_str
if self._cert_handler_extra_class is not None and \
self._cert_handler_extra_class.use_validate_cert_func():
self._cert_handler_extra_class.validate_cert(
cert_str, self._cert_str, self._key_str)
else:
valid, mess = self._osw.verify(self._cert_str, cert_str)
logger.info('CertHandler.verify_cert: %s', mess)
return valid
return True
def generate_cert(self):
return self._generate_cert
def update_cert(self, active=False, client_crt=None):
if (self._generate_cert and active) or client_crt is not None:
if client_crt is not None:
self._tmp_cert_str = client_crt
# No private key for signing
self._tmp_key_str = ''
elif self._cert_handler_extra_class is not None and \
self._cert_handler_extra_class.use_generate_cert_func():
(self._tmp_cert_str, self._tmp_key_str) = \
self._cert_handler_extra_class.generate_cert(
self._cert_info, self._cert_str, self._key_str)
else:
self._tmp_cert_str, self._tmp_key_str = self._osw \
.create_certificate(self._cert_info, request=True)
self._tmp_cert_str = self._osw.create_cert_signed_certificate(
self._cert_str, self._key_str, self._tmp_cert_str)
valid, mess = self._osw.verify(self._cert_str,
self._tmp_cert_str)
self._osw.write_str_to_file(self._tmp_cert_file, self._tmp_cert_str)
self._osw.write_str_to_file(self._tmp_key_file, self._tmp_key_str)
self._security_context.key_file = self._tmp_key_file
self._security_context.cert_file = self._tmp_cert_file
self._security_context.key_type = 'pem'
self._security_context.cert_type = 'pem'
self._security_context.my_cert = read_cert_from_file(
self._security_context.cert_file,
self._security_context.cert_type)
# How to get a rsa pub key fingerprint from a certificate
# openssl x509 -inform pem -noout -in server.crt -pubkey > publickey.pem
# openssl rsa -inform pem -noout -in publickey.pem -pubin -modulus
class SecurityContext(object):
my_cert = None
def __init__(
self,
crypto,
key_file='', key_type='pem',
cert_file='', cert_type='pem',
metadata=None,
template='',
encrypt_key_type='des-192',
only_use_keys_in_metadata=False,
cert_handler_extra_class=None,
generate_cert_info=None,
tmp_cert_file=None, tmp_key_file=None,
validate_certificate=None,
enc_key_files=None, enc_key_type='pem',
encryption_keypairs=None,
enc_cert_type='pem',
sec_backend=None,
delete_tmpfiles=True):
if not isinstance(crypto, CryptoBackend):
raise ValueError("crypto should be of type CryptoBackend")
self.crypto = crypto
if sec_backend and not isinstance(sec_backend, RSACrypto):
raise ValueError("sec_backend should be of type RSACrypto")
self.sec_backend = sec_backend
# Your private key for signing
self.key_file = key_file
self.key_type = key_type
# Your public key for signing
self.cert_file = cert_file
self.cert_type = cert_type
# Your private key for encryption
self.enc_key_files = enc_key_files
self.enc_key_type = enc_key_type
# Your public key for encryption
self.encryption_keypairs = encryption_keypairs
self.enc_cert_type = enc_cert_type
self.my_cert = read_cert_from_file(cert_file, cert_type)
self.cert_handler = CertHandler(
self,
cert_file, cert_type,
key_file, key_type,
generate_cert_info,
cert_handler_extra_class,
tmp_cert_file,
tmp_key_file,
validate_certificate)
self.cert_handler.update_cert(True)
self.metadata = metadata
self.only_use_keys_in_metadata = only_use_keys_in_metadata
if not template:
with _resource_path(_data_template, "template_enc.xml") as fp:
self.template = str(fp)
else:
self.template = template
self.encrypt_key_type = encrypt_key_type
self.delete_tmpfiles = delete_tmpfiles
def correctly_signed(self, xml, must=False):
logger.debug('verify correct signature')
return self.correctly_signed_response(xml, must)
def encrypt(self, text, recv_key='', template='', key_type=''):
"""
xmlsec encrypt --pubkey-pem pub-userkey.pem
--session-key aes128-cbc --xml-data doc-plain.xml
--output doc-encrypted.xml session-key-template.xml
:param text: Text to encrypt
:param recv_key: A file containing the receivers public key
:param template: A file containing the XMLSEC template
:param key_type: The type of session key to use
:result: An encrypted XML text
"""
if not key_type:
key_type = self.encrypt_key_type
if not template:
template = self.template
return self.crypto.encrypt(text, recv_key, template, key_type)
def encrypt_assertion(self, statement, enc_key, template, key_type='des-192', node_xpath=None):
"""
Will encrypt an assertion
:param statement: A XML document that contains the assertion to encrypt
:param enc_key: File name of a file containing the encryption key
:param template: A template for the encryption part to be added.
:param key_type: The type of session key to use.
:return: The encrypted text
"""
return self.crypto.encrypt_assertion(
statement, enc_key, template, key_type, node_xpath)
def decrypt_keys(self, enctext, keys=None):
""" Decrypting an encrypted text by the use of a private key.
:param enctext: The encrypted text as a string
:param keys: Keys to try to decrypt enctext with
:return: The decrypted text
"""
key_files = []
if not isinstance(keys, list):
keys = [keys]
keys_filtered = (key for key in keys if key)
keys_encoded = (
key.encode("ascii") if not isinstance(key, six.binary_type) else key
for key in keys_filtered
)
key_files = list(
make_temp(key, decode=False, delete_tmpfiles=self.delete_tmpfiles)
for key in keys_encoded
)
key_file_names = list(tmp.name for tmp in key_files)
try:
dectext = self.decrypt(enctext, key_file=key_file_names)
except DecryptError as e:
raise
else:
return dectext
def decrypt(self, enctext, key_file=None):
""" Decrypting an encrypted text by the use of a private key.
:param enctext: The encrypted text as a string
:return: The decrypted text
"""
if not isinstance(key_file, list):
key_file = [key_file]
key_files = [
key for key in itertools.chain(key_file, self.enc_key_files) if key
]
for key_file in key_files:
try:
dectext = self.crypto.decrypt(enctext, key_file)
except XmlsecError as e:
continue
else:
if dectext:
return dectext
errmsg = "No key was able to decrypt the ciphertext. Keys tried: {keys}"
errmsg = errmsg.format(keys=key_files)
raise DecryptError(errmsg)
def verify_signature(self, signedtext, cert_file=None, cert_type='pem', node_name=NODE_NAME, node_id=None):
""" Verifies the signature of a XML document.
:param signedtext: The XML document as a string
:param cert_file: The public key that was used to sign the document
:param cert_type: The file type of the certificate
:param node_name: The name of the class that is signed
:param node_id: The identifier of the node
:return: Boolean True if the signature was correct otherwise False.
"""
# This is only for testing purposes, otherwise when would you receive
# stuff that is signed with your key !?
if not cert_file:
cert_file = self.cert_file
cert_type = self.cert_type
return self.crypto.validate_signature(
signedtext,
cert_file=cert_file,
cert_type=cert_type,
node_name=node_name,
node_id=node_id,
)
def _check_signature(self, decoded_xml, item, node_name=NODE_NAME, origdoc=None, must=False, only_valid_cert=False, issuer=None):
try:
_issuer = item.issuer.text.strip()
except AttributeError:
_issuer = None
if _issuer is None:
try:
_issuer = issuer.text.strip()
except AttributeError:
_issuer = None
# More trust in certs from metadata then certs in the XML document
if self.metadata:
try:
_certs = self.metadata.certs(_issuer, 'any', 'signing')
except KeyError:
_certs = []
certs = []
for cert in _certs:
if isinstance(cert, six.string_types):
content = pem_format(cert)
tmp = make_temp(content,
suffix=".pem",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
certs.append(tmp)
else:
certs.append(cert)
else:
certs = []
if not certs and not self.only_use_keys_in_metadata:
logger.debug('==== Certs from instance ====')
certs = [
make_temp(content=pem_format(cert),
suffix=".pem",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
for cert in cert_from_instance(item)
]
else:
logger.debug('==== Certs from metadata ==== %s: %s ====', _issuer, certs)
if not certs:
raise MissingKey(_issuer)
# saml-core section "5.4 XML Signature Profile" defines constrains on the
# xmldsig-core facilities. It explicitly dictates that enveloped signatures
# are the only signatures allowed. This means that:
# * Assertion/RequestType/ResponseType elements must have an ID attribute
# * signatures must have a single Reference element
# * the Reference element must have a URI attribute
# * the URI attribute contains an anchor
# * the anchor points to the enclosing element's ID attribute
references = item.signature.signed_info.reference
signatures_must_have_a_single_reference_element = len(references) == 1
the_Reference_element_must_have_a_URI_attribute = (
signatures_must_have_a_single_reference_element
and hasattr(references[0], "uri")
)
the_URI_attribute_contains_an_anchor = (
the_Reference_element_must_have_a_URI_attribute
and references[0].uri.startswith("#")
and len(references[0].uri) > 1
)
the_anchor_points_to_the_enclosing_element_ID_attribute = (
the_URI_attribute_contains_an_anchor
and references[0].uri == "#{id}".format(id=item.id)
)
validators = {
"signatures must have a single reference element": (
signatures_must_have_a_single_reference_element
),
"the Reference element must have a URI attribute": (
the_Reference_element_must_have_a_URI_attribute
),
"the URI attribute contains an anchor": (
the_URI_attribute_contains_an_anchor
),
"the anchor points to the enclosing element ID attribute": (
the_anchor_points_to_the_enclosing_element_ID_attribute
),
}
if not all(validators.values()):
error_context = {
"message": "Signature failed to meet constraints on xmldsig",
"validators": validators,
"item ID": item.id,
"reference URI": item.signature.signed_info.reference[0].uri,
"issuer": _issuer,
"node name": node_name,
"xml document": decoded_xml,
}
raise SignatureError(error_context)
verified = False
last_pem_file = None
for pem_fd in certs:
try:
last_pem_file = pem_fd.name
if self.verify_signature(
decoded_xml,
pem_fd.name,
node_name=node_name,
node_id=item.id,
):
verified = True
break
except XmlsecError as exc:
logger.error('check_sig: %s', exc)
pass
except Exception as exc:
logger.error('check_sig: %s', exc)
raise
if verified or only_valid_cert:
if not self.cert_handler.verify_cert(last_pem_file):
raise CertificateError('Invalid certificate!')
else:
raise SignatureError('Failed to verify signature')
return item
def check_signature(self, item, node_name=NODE_NAME, origdoc=None, must=False, issuer=None):
"""
:param item: Parsed entity
:param node_name: The name of the node/class/element that is signed
:param origdoc: The original XML string
:param must:
:return:
"""
return self._check_signature(
origdoc,
item,
node_name,
origdoc,
must=must,
issuer=issuer,
)
def correctly_signed_message(self, decoded_xml, msgtype, must=False, origdoc=None, only_valid_cert=False):
"""Check if a request is correctly signed, if we have metadata for
the entity that sent the info use that, if not use the key that are in
the message if any.
:param decoded_xml: The SAML message as an XML infoset (a string)
:param msgtype: SAML protocol message type
:param must: Whether there must be a signature
:param origdoc:
:return:
"""
attr = '{type}_from_string'.format(type=msgtype)
_func = getattr(saml, attr, None)
_func = getattr(samlp, attr, _func)
msg = _func(decoded_xml)
if not msg:
raise TypeError('Not a {type}'.format(type=msgtype))
if not msg.signature:
if must:
err_msg = 'Required signature missing on {type}'
err_msg = err_msg.format(type=msgtype)
raise SignatureError(err_msg)
else:
return msg
return self._check_signature(
decoded_xml,
msg,
class_name(msg),
origdoc,
must=must,
only_valid_cert=only_valid_cert)
def correctly_signed_authn_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authn_request', must, origdoc, only_valid_cert=only_valid_cert)
def correctly_signed_authn_query(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authn_query', must, origdoc, only_valid_cert)
def correctly_signed_logout_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'logout_request', must, origdoc, only_valid_cert)
def correctly_signed_logout_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'logout_response', must, origdoc, only_valid_cert)
def correctly_signed_attribute_query(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'attribute_query', must, origdoc, only_valid_cert)
def correctly_signed_authz_decision_query(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authz_decision_query', must, origdoc, only_valid_cert)
def correctly_signed_authz_decision_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authz_decision_response', must, origdoc, only_valid_cert)
def correctly_signed_name_id_mapping_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'name_id_mapping_request', must, origdoc, only_valid_cert)
def correctly_signed_name_id_mapping_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'name_id_mapping_response', must, origdoc, only_valid_cert)
def correctly_signed_artifact_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'artifact_request', must, origdoc, only_valid_cert)
def correctly_signed_artifact_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'artifact_response', must, origdoc, only_valid_cert)
def correctly_signed_manage_name_id_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'manage_name_id_request', must, origdoc, only_valid_cert)
def correctly_signed_manage_name_id_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'manage_name_id_response', must, origdoc, only_valid_cert)
def correctly_signed_assertion_id_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'assertion_id_request', must, origdoc, only_valid_cert)
def correctly_signed_assertion_id_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'assertion', must, origdoc, only_valid_cert)
def correctly_signed_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, require_response_signature=False, **kwargs):
""" Check if a instance is correctly signed, if we have metadata for
the IdP that sent the info use that, if not use the key that are in
the message if any.
:param decoded_xml: The SAML message as a XML string
:param must: Whether there must be a signature
:param origdoc:
:param only_valid_cert:
:param require_response_signature:
:return: None if the signature can not be verified otherwise an instance
"""
response = samlp.any_response_from_string(decoded_xml)
if not response:
raise TypeError('Not a Response')
if response.signature:
if 'do_not_verify' in kwargs:
pass
else:
self._check_signature(decoded_xml, response,
class_name(response), origdoc)
elif require_response_signature:
raise SignatureError('Signature missing for response')
return response
def sign_statement_using_xmlsec(self, statement, **kwargs):
""" Deprecated function. See sign_statement(). """
return self.sign_statement(statement, **kwargs)
def sign_statement(self, statement, node_name, key=None, key_file=None, node_id=None):
"""Sign a SAML statement.
:param statement: The statement to be signed
:param node_name: string like 'urn:oasis:names:...:Assertion'
:param key: The key to be used for the signing, either this or
:param key_file: The file where the key can be found
:param node_id:
:return: The signed statement
"""
if not key_file and key:
content = str(key).encode()
tmp = make_temp(content, suffix=".pem", delete_tmpfiles=self.delete_tmpfiles)
key_file = tmp.name
if not key and not key_file:
key_file = self.key_file
return self.crypto.sign_statement(
statement,
node_name,
key_file,
node_id,
)
def sign_assertion(self, statement, **kwargs):
"""Sign a SAML assertion.
See sign_statement() for the kwargs.
:param statement: The statement to be signed
:return: The signed statement
"""
return self.sign_statement(
statement, class_name(saml.Assertion()), **kwargs)
def sign_attribute_query_using_xmlsec(self, statement, **kwargs):
""" Deprecated function. See sign_attribute_query(). """
return self.sign_attribute_query(statement, **kwargs)
def sign_attribute_query(self, statement, **kwargs):
"""Sign a SAML attribute query.
See sign_statement() for the kwargs.
:param statement: The statement to be signed
:return: The signed statement
"""
return self.sign_statement(
statement, class_name(samlp.AttributeQuery()), **kwargs)
def multiple_signatures(self, statement, to_sign, key=None, key_file=None, sign_alg=None, digest_alg=None):
"""
Sign multiple parts of a statement
:param statement: The statement that should be sign, this is XML text
:param to_sign: A list of (items, id) tuples that specifies what to sign
:param key: A key that should be used for doing the signing
:param key_file: A file that contains the key to be used
:return: A possibly multiple signed statement
"""
for (item, sid) in to_sign:
if not sid:
if not item.id:
sid = item.id = sid()
else:
sid = item.id
if not item.signature:
item.signature = pre_signature_part(
ident=sid,
public_key=self.cert_file,
sign_alg=sign_alg,
digest_alg=digest_alg,
)
statement = self.sign_statement(
statement,
class_name(item),
key=key,
key_file=key_file,
node_id=sid,
)
return statement
# XXX FIXME calls DefaultSignature - remove to unveil chain of calls without proper args
def pre_signature_part(
ident,
public_key=None,
identifier=None,
digest_alg=None,
sign_alg=None,
):
"""
If an assertion is to be signed the signature part has to be preset
with which algorithms to be used, this function returns such a
preset part.
:param ident: The identifier of the assertion, so you know which assertion
was signed
:param public_key: The base64 part of a PEM file
:param identifier:
:return: A preset signature part
"""
# XXX
if not digest_alg:
digest_alg = ds.DefaultSignature().get_digest_alg()
if not sign_alg:
sign_alg = ds.DefaultSignature().get_sign_alg()
signature_method = ds.SignatureMethod(algorithm=sign_alg)
canonicalization_method = ds.CanonicalizationMethod(
algorithm=ds.ALG_EXC_C14N)
trans0 = ds.Transform(algorithm=ds.TRANSFORM_ENVELOPED)
trans1 = ds.Transform(algorithm=ds.ALG_EXC_C14N)
transforms = ds.Transforms(transform=[trans0, trans1])
digest_method = ds.DigestMethod(algorithm=digest_alg)
reference = ds.Reference(
uri='#{id}'.format(id=ident),
digest_value=ds.DigestValue(),
transforms=transforms,
digest_method=digest_method)
signed_info = ds.SignedInfo(
signature_method=signature_method,
canonicalization_method=canonicalization_method,
reference=reference)
signature = ds.Signature(
signed_info=signed_info,
signature_value=ds.SignatureValue())
if identifier:
signature.id = 'Signature{n}'.format(n=identifier)
if public_key:
x509_data = ds.X509Data(
x509_certificate=[ds.X509Certificate(text=public_key)])
key_info = ds.KeyInfo(x509_data=x509_data)
signature.key_info = key_info
return signature
# <?xml version="1.0" encoding="UTF-8"?>
# <EncryptedData Id="ED" Type="http://www.w3.org/2001/04/xmlenc#Element"
# xmlns="http://www.w3.org/2001/04/xmlenc#">
# <EncryptionMethod Algorithm="http://www.w3
# .org/2001/04/xmlenc#tripledes-cbc"/>
# <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
# <EncryptedKey Id="EK" xmlns="http://www.w3.org/2001/04/xmlenc#">
# <EncryptionMethod Algorithm="http://www.w3
# .org/2001/04/xmlenc#rsa-1_5"/>
# <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
# <ds:KeyName>my-rsa-key</ds:KeyName>
# </ds:KeyInfo>
# <CipherData>
# <CipherValue>
# </CipherValue>
# </CipherData>
# <ReferenceList>
# <DataReference URI="#ED"/>
# </ReferenceList>
# </EncryptedKey>
# </ds:KeyInfo>
# <CipherData>
# <CipherValue>
# </CipherValue>
# </CipherData>
# </EncryptedData>
def pre_encryption_part(msg_enc=TRIPLE_DES_CBC, key_enc=RSA_1_5, key_name='my-rsa-key',
encrypted_key_id=None, encrypted_data_id=None):
"""
:param msg_enc:
:param key_enc:
:param key_name:
:return:
"""
ek_id = encrypted_key_id or "EK_{id}".format(id=gen_random_key())
ed_id = encrypted_data_id or "ED_{id}".format(id=gen_random_key())
msg_encryption_method = EncryptionMethod(algorithm=msg_enc)
key_encryption_method = EncryptionMethod(algorithm=key_enc)
encrypted_key = EncryptedKey(
id=ek_id,
encryption_method=key_encryption_method,
key_info=ds.KeyInfo(key_name=ds.KeyName(text=key_name)),
cipher_data=CipherData(cipher_value=CipherValue(text='')),
)
key_info = ds.KeyInfo(encrypted_key=encrypted_key)
encrypted_data = EncryptedData(
id=ed_id,
type='http://www.w3.org/2001/04/xmlenc#Element',
encryption_method=msg_encryption_method,
key_info=key_info,
cipher_data=CipherData(cipher_value=CipherValue(text='')))
return encrypted_data
def pre_encrypt_assertion(response):
"""
Move the assertion to within a encrypted_assertion
:param response: The response with one assertion
:return: The response but now with the assertion within an
encrypted_assertion.
"""
assertion = response.assertion
response.assertion = None
response.encrypted_assertion = EncryptedAssertion()
if assertion is not None:
if isinstance(assertion, list):
response.encrypted_assertion.add_extension_elements(assertion)
else:
response.encrypted_assertion.add_extension_element(assertion)
return response
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--list-sigalgs', dest='listsigalgs',
action='store_true',
help='List implemented signature algorithms')
args = parser.parse_args()
if args.listsigalgs:
print('\n'.join([key for key, value in SIGNER_ALGS.items()]))
| ./CrossVul/dataset_final_sorted/CWE-347/py/bad_1889_0 |
crossvul-python_data_good_1889_0 | """ Functions connected to signing and verifying.
Based on the use of xmlsec1 binaries and not the python xmlsec module.
"""
import base64
import hashlib
import itertools
import logging
import os
import six
from uuid import uuid4 as gen_random_key
from time import mktime
from tempfile import NamedTemporaryFile
from subprocess import Popen
from subprocess import PIPE
from importlib_resources import path as _resource_path
from OpenSSL import crypto
import pytz
from six.moves.urllib import parse
import saml2.cryptography.asymmetric
import saml2.cryptography.pki
import saml2.xmldsig as ds
import saml2.data.templates as _data_template
from saml2 import samlp
from saml2 import SamlBase
from saml2 import SAMLError
from saml2 import extension_elements_to_elements
from saml2 import class_name
from saml2 import saml
from saml2 import ExtensionElement
from saml2 import VERSION
from saml2.cert import OpenSSLWrapper
from saml2.extension import pefim
from saml2.extension.pefim import SPCertEnc
from saml2.saml import EncryptedAssertion
from saml2.s_utils import sid
from saml2.s_utils import Unsupported
from saml2.time_util import instant
from saml2.time_util import str_to_time
from saml2.xmldsig import SIG_RSA_SHA1
from saml2.xmldsig import SIG_RSA_SHA224
from saml2.xmldsig import SIG_RSA_SHA256
from saml2.xmldsig import SIG_RSA_SHA384
from saml2.xmldsig import SIG_RSA_SHA512
from saml2.xmlenc import EncryptionMethod
from saml2.xmlenc import EncryptedKey
from saml2.xmlenc import CipherData
from saml2.xmlenc import CipherValue
from saml2.xmlenc import EncryptedData
logger = logging.getLogger(__name__)
SIG = '{{{ns}#}}{attribute}'.format(ns=ds.NAMESPACE, attribute='Signature')
RSA_1_5 = 'http://www.w3.org/2001/04/xmlenc#rsa-1_5'
TRIPLE_DES_CBC = 'http://www.w3.org/2001/04/xmlenc#tripledes-cbc'
class SigverError(SAMLError):
pass
class CertificateTooOld(SigverError):
pass
class XmlsecError(SigverError):
pass
class MissingKey(SigverError):
pass
class DecryptError(XmlsecError):
pass
class EncryptError(XmlsecError):
pass
class SignatureError(XmlsecError):
pass
class BadSignature(SigverError):
"""The signature is invalid."""
pass
class CertificateError(SigverError):
pass
def read_file(*args, **kwargs):
with open(*args, **kwargs) as handler:
return handler.read()
def rm_xmltag(statement):
XMLTAG = "<?xml version='1.0'?>"
PREFIX1 = "<?xml version='1.0' encoding='UTF-8'?>"
PREFIX2 = '<?xml version="1.0" encoding="UTF-8"?>'
try:
_t = statement.startswith(XMLTAG)
except TypeError:
statement = statement.decode()
_t = statement.startswith(XMLTAG)
if _t:
statement = statement[len(XMLTAG):]
if statement[0] == '\n':
statement = statement[1:]
elif statement.startswith(PREFIX1):
statement = statement[len(PREFIX1):]
if statement[0] == '\n':
statement = statement[1:]
elif statement.startswith(PREFIX2):
statement = statement[len(PREFIX2):]
if statement[0] == '\n':
statement = statement[1:]
return statement
def signed(item):
"""
Is any part of the document signed ?
:param item: A Samlbase instance
:return: True if some part of it is signed
"""
if SIG in item.c_children.keys() and item.signature:
return True
else:
for prop in item.c_child_order:
child = getattr(item, prop, None)
if isinstance(child, list):
for chi in child:
if signed(chi):
return True
elif child and signed(child):
return True
return False
def get_xmlsec_binary(paths=None):
"""
Tries to find the xmlsec1 binary.
:param paths: Non-system path paths which should be searched when
looking for xmlsec1
:return: full name of the xmlsec1 binary found. If no binaries are
found then an exception is raised.
"""
if os.name == 'posix':
bin_name = ['xmlsec1']
elif os.name == 'nt':
bin_name = ['xmlsec.exe', 'xmlsec1.exe']
else: # Default !?
bin_name = ['xmlsec1']
if paths:
for bname in bin_name:
for path in paths:
fil = os.path.join(path, bname)
try:
if os.lstat(fil):
return fil
except OSError:
pass
for path in os.environ['PATH'].split(os.pathsep):
for bname in bin_name:
fil = os.path.join(path, bname)
try:
if os.lstat(fil):
return fil
except OSError:
pass
raise SigverError('Cannot find {binary}'.format(binary=bin_name))
def _get_xmlsec_cryptobackend(path=None, search_paths=None, delete_tmpfiles=True):
"""
Initialize a CryptoBackendXmlSec1 crypto backend.
This function is now internal to this module.
"""
if path is None:
path = get_xmlsec_binary(paths=search_paths)
return CryptoBackendXmlSec1(path, delete_tmpfiles=delete_tmpfiles)
NODE_NAME = 'urn:oasis:names:tc:SAML:2.0:assertion:Assertion'
ENC_NODE_NAME = 'urn:oasis:names:tc:SAML:2.0:assertion:EncryptedAssertion'
ENC_KEY_CLASS = 'EncryptedKey'
def _make_vals(val, klass, seccont, klass_inst=None, prop=None, part=False,
base64encode=False, elements_to_sign=None):
"""
Creates a class instance with a specified value, the specified
class instance may be a value on a property in a defined class instance.
:param val: The value
:param klass: The value class
:param klass_inst: The class instance which has a property on which
what this function returns is a value.
:param prop: The property which the value should be assigned to.
:param part: If the value is one of a possible list of values it should be
handled slightly different compared to if it isn't.
:return: Value class instance
"""
cinst = None
if isinstance(val, dict):
cinst = _instance(klass, val, seccont, base64encode=base64encode,
elements_to_sign=elements_to_sign)
else:
try:
cinst = klass().set_text(val)
except ValueError:
if not part:
cis = [
_make_vals(
sval,
klass,
seccont,
klass_inst,
prop,
True,
base64encode,
elements_to_sign)
for sval in val
]
setattr(klass_inst, prop, cis)
else:
raise
if part:
return cinst
else:
if cinst:
cis = [cinst]
setattr(klass_inst, prop, cis)
def _instance(klass, ava, seccont, base64encode=False, elements_to_sign=None):
instance = klass()
for prop in instance.c_attributes.values():
if prop in ava:
if isinstance(ava[prop], bool):
setattr(instance, prop, str(ava[prop]).encode())
elif isinstance(ava[prop], int):
setattr(instance, prop, str(ava[prop]))
else:
setattr(instance, prop, ava[prop])
if 'text' in ava:
instance.set_text(ava['text'], base64encode)
for prop, klassdef in instance.c_children.values():
if prop in ava:
if isinstance(klassdef, list):
# means there can be a list of values
_make_vals(ava[prop], klassdef[0], seccont, instance, prop,
base64encode=base64encode,
elements_to_sign=elements_to_sign)
else:
cis = _make_vals(ava[prop], klassdef, seccont, instance, prop,
True, base64encode, elements_to_sign)
setattr(instance, prop, cis)
if 'extension_elements' in ava:
for item in ava['extension_elements']:
instance.extension_elements.append(
ExtensionElement(item['tag']).loadd(item))
if 'extension_attributes' in ava:
for key, val in ava['extension_attributes'].items():
instance.extension_attributes[key] = val
if 'signature' in ava:
elements_to_sign.append((class_name(instance), instance.id))
return instance
# XXX will actually sign the nodes
# XXX assumes pre_signature_part has already been called
# XXX calls sign without specifying sign_alg/digest_alg
# XXX this is fine as the algs are embeded in the document
# XXX as setup by pre_signature_part
# XXX !!expects instance string!!
def signed_instance_factory(instance, seccont, elements_to_sign=None):
"""
:param instance: The instance to be signed or not
:param seccont: The security context
:param elements_to_sign: Which parts if any that should be signed
:return: A class instance if not signed otherwise a string
"""
if not elements_to_sign:
return instance
signed_xml = instance
if not isinstance(instance, six.string_types):
signed_xml = instance.to_string()
for (node_name, nodeid) in elements_to_sign:
signed_xml = seccont.sign_statement(
signed_xml, node_name=node_name, node_id=nodeid
)
return signed_xml
def make_temp(content, suffix="", decode=True, delete_tmpfiles=True):
"""
Create a temporary file with the given content.
This is needed by xmlsec in some cases where only strings exist when files
are expected.
:param content: The information to be placed in the file
:param suffix: The temporary file might have to have a specific
suffix in certain circumstances.
:param decode: The input content might be base64 coded. If so it
must, in some cases, be decoded before being placed in the file.
:param delete_tmpfiles: Whether to keep the tmp files or delete them when they are
no longer in use
:return: 2-tuple with file pointer ( so the calling function can
close the file) and filename (which is for instance needed by the
xmlsec function).
"""
content_encoded = (
content.encode("utf-8") if not isinstance(content, six.binary_type) else content
)
content_raw = base64.b64decode(content_encoded) if decode else content_encoded
ntf = NamedTemporaryFile(suffix=suffix, delete=delete_tmpfiles)
ntf.write(content_raw)
ntf.seek(0)
return ntf
def split_len(seq, length):
return [seq[i:i + length] for i in range(0, len(seq), length)]
M2_TIME_FORMAT = '%b %d %H:%M:%S %Y'
def to_time(_time):
if not _time.endswith(' GMT'):
raise ValueError('Time does not end with GMT')
_time = _time[:-4]
return mktime(str_to_time(_time, M2_TIME_FORMAT))
def active_cert(key):
"""
Verifies that a key is active that is present time is after not_before
and before not_after.
:param key: The Key
:return: True if the key is active else False
"""
try:
cert_str = pem_format(key)
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
except AttributeError:
return False
now = pytz.UTC.localize(datetime.datetime.utcnow())
valid_from = dateutil.parser.parse(cert.get_notBefore())
valid_to = dateutil.parser.parse(cert.get_notAfter())
active = not cert.has_expired() and valid_from <= now < valid_to
return active
def cert_from_key_info(key_info, ignore_age=False):
""" Get all X509 certs from a KeyInfo instance. Care is taken to make sure
that the certs are continues sequences of bytes.
All certificates appearing in an X509Data element MUST relate to the
validation key by either containing it or being part of a certification
chain that terminates in a certificate containing the validation key.
:param key_info: The KeyInfo instance
:return: A possibly empty list of certs
"""
res = []
for x509_data in key_info.x509_data:
x509_certificate = x509_data.x509_certificate
cert = x509_certificate.text.strip()
cert = '\n'.join(split_len(''.join([s.strip() for s in
cert.split()]), 64))
if ignore_age or active_cert(cert):
res.append(cert)
else:
logger.info('Inactive cert')
return res
def cert_from_key_info_dict(key_info, ignore_age=False):
""" Get all X509 certs from a KeyInfo dictionary. Care is taken to make sure
that the certs are continues sequences of bytes.
All certificates appearing in an X509Data element MUST relate to the
validation key by either containing it or being part of a certification
chain that terminates in a certificate containing the validation key.
:param key_info: The KeyInfo dictionary
:return: A possibly empty list of certs in their text representation
"""
res = []
if 'x509_data' not in key_info:
return res
for x509_data in key_info['x509_data']:
x509_certificate = x509_data['x509_certificate']
cert = x509_certificate['text'].strip()
cert = '\n'.join(split_len(''.join(
[s.strip() for s in cert.split()]), 64))
if ignore_age or active_cert(cert):
res.append(cert)
else:
logger.info('Inactive cert')
return res
def cert_from_instance(instance):
""" Find certificates that are part of an instance
:param instance: An instance
:return: possible empty list of certificates
"""
if instance.signature:
if instance.signature.key_info:
return cert_from_key_info(instance.signature.key_info,
ignore_age=True)
return []
def extract_rsa_key_from_x509_cert(pem):
cert = saml2.cryptography.pki.load_pem_x509_certificate(pem)
return cert.public_key()
def pem_format(key):
return os.linesep.join([
'-----BEGIN CERTIFICATE-----',
key,
'-----END CERTIFICATE-----'
]).encode('ascii')
def import_rsa_key_from_file(filename):
data = read_file(filename, 'rb')
key = saml2.cryptography.asymmetric.load_pem_private_key(data, None)
return key
def parse_xmlsec_output(output):
""" Parse the output from xmlsec to try to find out if the
command was successfull or not.
:param output: The output from Popen
:return: A boolean; True if the command was a success otherwise False
"""
for line in output.splitlines():
if line == 'OK':
return True
elif line == 'FAIL':
raise XmlsecError(output)
raise XmlsecError(output)
def sha1_digest(msg):
return hashlib.sha1(msg).digest()
class Signer(object):
"""Abstract base class for signing algorithms."""
def __init__(self, key):
self.key = key
def sign(self, msg, key):
"""Sign ``msg`` with ``key`` and return the signature."""
raise NotImplementedError
def verify(self, msg, sig, key):
"""Return True if ``sig`` is a valid signature for ``msg``."""
raise NotImplementedError
class RSASigner(Signer):
def __init__(self, digest, key=None):
Signer.__init__(self, key)
self.digest = digest
def sign(self, msg, key=None):
return saml2.cryptography.asymmetric.key_sign(
key or self.key, msg, self.digest)
def verify(self, msg, sig, key=None):
return saml2.cryptography.asymmetric.key_verify(
key or self.key, sig, msg, self.digest)
SIGNER_ALGS = {
SIG_RSA_SHA1: RSASigner(saml2.cryptography.asymmetric.hashes.SHA1()),
SIG_RSA_SHA224: RSASigner(saml2.cryptography.asymmetric.hashes.SHA224()),
SIG_RSA_SHA256: RSASigner(saml2.cryptography.asymmetric.hashes.SHA256()),
SIG_RSA_SHA384: RSASigner(saml2.cryptography.asymmetric.hashes.SHA384()),
SIG_RSA_SHA512: RSASigner(saml2.cryptography.asymmetric.hashes.SHA512()),
}
REQ_ORDER = [
'SAMLRequest',
'RelayState',
'SigAlg',
]
RESP_ORDER = [
'SAMLResponse',
'RelayState',
'SigAlg',
]
class RSACrypto(object):
def __init__(self, key):
self.key = key
def get_signer(self, sigalg, sigkey=None):
try:
signer = SIGNER_ALGS[sigalg]
except KeyError:
return None
else:
if sigkey:
signer.key = sigkey
else:
signer.key = self.key
return signer
def verify_redirect_signature(saml_msg, crypto, cert=None, sigkey=None):
"""
:param saml_msg: A dictionary with strings as values, *NOT* lists as
produced by parse_qs.
:param cert: A certificate to use when verifying the signature
:return: True, if signature verified
"""
try:
signer = crypto.get_signer(saml_msg['SigAlg'], sigkey)
except KeyError:
raise Unsupported('Signature algorithm: {alg}'.format(alg=saml_msg['SigAlg']))
else:
if saml_msg['SigAlg'] in SIGNER_ALGS:
if 'SAMLRequest' in saml_msg:
_order = REQ_ORDER
elif 'SAMLResponse' in saml_msg:
_order = RESP_ORDER
else:
raise Unsupported(
'Verifying signature on something that should not be signed'
)
_args = saml_msg.copy()
del _args['Signature'] # everything but the signature
string = '&'.join(
[
parse.urlencode({k: _args[k]})
for k in _order
if k in _args
]
).encode('ascii')
if cert:
_key = extract_rsa_key_from_x509_cert(pem_format(cert))
else:
_key = sigkey
_sign = base64.b64decode(saml_msg['Signature'])
return bool(signer.verify(string, _sign, _key))
def make_str(txt):
if isinstance(txt, six.string_types):
return txt
else:
return txt.decode()
def read_cert_from_file(cert_file, cert_type):
""" Reads a certificate from a file. The assumption is that there is
only one certificate in the file
:param cert_file: The name of the file
:param cert_type: The certificate type
:return: A base64 encoded certificate as a string or the empty string
"""
if not cert_file:
return ''
if cert_type == 'pem':
_a = read_file(cert_file, 'rb').decode()
_b = _a.replace('\r\n', '\n')
lines = _b.split('\n')
for pattern in (
'-----BEGIN CERTIFICATE-----',
'-----BEGIN PUBLIC KEY-----'):
if pattern in lines:
lines = lines[lines.index(pattern) + 1:]
break
else:
raise CertificateError('Strange beginning of PEM file')
for pattern in (
'-----END CERTIFICATE-----',
'-----END PUBLIC KEY-----'):
if pattern in lines:
lines = lines[:lines.index(pattern)]
break
else:
raise CertificateError('Strange end of PEM file')
return make_str(''.join(lines).encode())
if cert_type in ['der', 'cer', 'crt']:
data = read_file(cert_file, 'rb')
_cert = base64.b64encode(data)
return make_str(_cert)
class CryptoBackend(object):
def version(self):
raise NotImplementedError()
def encrypt(self, text, recv_key, template, key_type):
raise NotImplementedError()
def encrypt_assertion(self, statement, enc_key, template, key_type, node_xpath):
raise NotImplementedError()
def decrypt(self, enctext, key_file):
raise NotImplementedError()
def sign_statement(self, statement, node_name, key_file, node_id):
raise NotImplementedError()
def validate_signature(self, enctext, cert_file, cert_type, node_name, node_id):
raise NotImplementedError()
ASSERT_XPATH = ''.join([
'/*[local-name()=\'{name}\']'.format(name=n)
for n in ['Response', 'EncryptedAssertion', 'Assertion']
])
class CryptoBackendXmlSec1(CryptoBackend):
"""
CryptoBackend implementation using external binary 1 to sign
and verify XML documents.
"""
__DEBUG = 0
def __init__(self, xmlsec_binary, delete_tmpfiles=True, **kwargs):
CryptoBackend.__init__(self, **kwargs)
if not isinstance(xmlsec_binary, six.string_types):
raise ValueError("xmlsec_binary should be of type string")
self.xmlsec = xmlsec_binary
self.delete_tmpfiles = delete_tmpfiles
try:
self.non_xml_crypto = RSACrypto(kwargs['rsa_key'])
except KeyError:
pass
def version(self):
com_list = [self.xmlsec, '--version']
pof = Popen(com_list, stderr=PIPE, stdout=PIPE)
content, _ = pof.communicate()
content = content.decode('ascii')
try:
return content.split(' ')[1]
except IndexError:
return ''
def encrypt(self, text, recv_key, template, session_key_type, xpath=''):
"""
:param text: The text to be compiled
:param recv_key: Filename of a file where the key resides
:param template: Filename of a file with the pre-encryption part
:param session_key_type: Type and size of a new session key
'des-192' generates a new 192 bits DES key for DES3 encryption
:param xpath: What should be encrypted
:return:
"""
logger.debug('Encryption input len: %d', len(text))
tmp = make_temp(text, decode=False, delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--encrypt',
'--pubkey-cert-pem', recv_key,
'--session-key', session_key_type,
'--xml-data', tmp.name,
]
if xpath:
com_list.extend(['--node-xpath', xpath])
try:
(_stdout, _stderr, output) = self._run_xmlsec(com_list, [template])
except XmlsecError as e:
six.raise_from(EncryptError(com_list), e)
return output
def encrypt_assertion(self, statement, enc_key, template, key_type='des-192', node_xpath=None, node_id=None):
"""
Will encrypt an assertion
:param statement: A XML document that contains the assertion to encrypt
:param enc_key: File name of a file containing the encryption key
:param template: A template for the encryption part to be added.
:param key_type: The type of session key to use.
:return: The encrypted text
"""
if isinstance(statement, SamlBase):
statement = pre_encrypt_assertion(statement)
tmp = make_temp(str(statement),
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
tmp2 = make_temp(str(template),
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
if not node_xpath:
node_xpath = ASSERT_XPATH
com_list = [
self.xmlsec,
'--encrypt',
'--pubkey-cert-pem', enc_key,
'--session-key', key_type,
'--xml-data', tmp.name,
'--node-xpath', node_xpath,
]
if node_id:
com_list.extend(['--node-id', node_id])
try:
(_stdout, _stderr, output) = self._run_xmlsec(com_list, [tmp2.name])
except XmlsecError as e:
six.raise_from(EncryptError(com_list), e)
return output.decode('utf-8')
def decrypt(self, enctext, key_file):
"""
:param enctext: XML document containing an encrypted part
:param key_file: The key to use for the decryption
:return: The decrypted document
"""
logger.debug('Decrypt input len: %d', len(enctext))
tmp = make_temp(enctext, decode=False, delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--decrypt',
'--privkey-pem', key_file,
'--id-attr:Id', ENC_KEY_CLASS,
]
try:
(_stdout, _stderr, output) = self._run_xmlsec(com_list, [tmp.name])
except XmlsecError as e:
six.raise_from(DecryptError(com_list), e)
return output.decode('utf-8')
def sign_statement(self, statement, node_name, key_file, node_id):
"""
Sign an XML statement.
:param statement: The statement to be signed
:param node_name: string like 'urn:oasis:names:...:Assertion'
:param key_file: The file where the key can be found
:param node_id:
:return: The signed statement
"""
if isinstance(statement, SamlBase):
statement = str(statement)
tmp = make_temp(statement,
suffix=".xml",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--sign',
'--privkey-pem', key_file,
'--id-attr:ID', node_name,
]
if node_id:
com_list.extend(['--node-id', node_id])
try:
(stdout, stderr, output) = self._run_xmlsec(com_list, [tmp.name])
except XmlsecError as e:
raise SignatureError(com_list)
# this does not work if --store-signatures is used
if output:
return output.decode("utf-8")
if stdout:
return stdout.decode("utf-8")
raise SignatureError(stderr)
def validate_signature(self, signedtext, cert_file, cert_type, node_name, node_id):
"""
Validate signature on XML document.
:param signedtext: The XML document as a string
:param cert_file: The public key that was used to sign the document
:param cert_type: The file type of the certificate
:param node_name: The name of the class that is signed
:param node_id: The identifier of the node
:return: Boolean True if the signature was correct otherwise False.
"""
if not isinstance(signedtext, six.binary_type):
signedtext = signedtext.encode('utf-8')
tmp = make_temp(signedtext,
suffix=".xml",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--verify',
'--enabled-reference-uris', 'empty,same-doc',
'--enabled-key-data', 'raw-x509-cert',
'--pubkey-cert-{type}'.format(type=cert_type), cert_file,
'--id-attr:ID', node_name,
]
if node_id:
com_list.extend(['--node-id', node_id])
try:
(_stdout, stderr, _output) = self._run_xmlsec(com_list, [tmp.name])
except XmlsecError as e:
six.raise_from(SignatureError(com_list), e)
return parse_xmlsec_output(stderr)
def _run_xmlsec(self, com_list, extra_args):
"""
Common code to invoke xmlsec and parse the output.
:param com_list: Key-value parameter list for xmlsec
:param extra_args: Positional parameters to be appended after all
key-value parameters
:result: Whatever xmlsec wrote to an --output temporary file
"""
with NamedTemporaryFile(suffix='.xml') as ntf:
com_list.extend(['--output', ntf.name])
com_list += extra_args
logger.debug('xmlsec command: %s', ' '.join(com_list))
pof = Popen(com_list, stderr=PIPE, stdout=PIPE)
p_out, p_err = pof.communicate()
p_out = p_out.decode()
p_err = p_err.decode()
if pof.returncode != 0:
errmsg = "returncode={code}\nerror={err}\noutput={out}".format(
code=pof.returncode, err=p_err, out=p_out
)
logger.error(errmsg)
raise XmlsecError(errmsg)
ntf.seek(0)
return p_out, p_err, ntf.read()
class CryptoBackendXMLSecurity(CryptoBackend):
"""
CryptoBackend implementation using pyXMLSecurity to sign and verify
XML documents.
Encrypt and decrypt is currently unsupported by pyXMLSecurity.
pyXMLSecurity uses lxml (libxml2) to parse XML data, but otherwise
try to get by with native Python code. It does native Python RSA
signatures, or alternatively PyKCS11 to offload cryptographic work
to an external PKCS#11 module.
"""
def __init__(self):
CryptoBackend.__init__(self)
def version(self):
# XXX if XMLSecurity.__init__ included a __version__, that would be
# better than static 0.0 here.
return 'XMLSecurity 0.0'
def sign_statement(self, statement, node_name, key_file, node_id):
"""
Sign an XML statement.
The parameters actually used in this CryptoBackend
implementation are :
:param statement: XML as string
:param node_name: Name of the node to sign
:param key_file: xmlsec key_spec string(), filename,
'pkcs11://' URI or PEM data
:returns: Signed XML as string
"""
import xmlsec
import lxml.etree
xml = xmlsec.parse_xml(statement)
signed = xmlsec.sign(xml, key_file)
signed_str = lxml.etree.tostring(signed, xml_declaration=False, encoding="UTF-8")
if not isinstance(signed_str, six.string_types):
signed_str = signed_str.decode("utf-8")
return signed_str
def validate_signature(self, signedtext, cert_file, cert_type, node_name, node_id):
"""
Validate signature on XML document.
The parameters actually used in this CryptoBackend
implementation are :
:param signedtext: The signed XML data as string
:param cert_file: xmlsec key_spec string(), filename,
'pkcs11://' URI or PEM data
:param cert_type: string, must be 'pem' for now
:returns: True on successful validation, False otherwise
"""
if cert_type != 'pem':
raise Unsupported('Only PEM certs supported here')
import xmlsec
xml = xmlsec.parse_xml(signedtext)
try:
return xmlsec.verify(xml, cert_file)
except xmlsec.XMLSigException:
return False
def security_context(conf):
""" Creates a security context based on the configuration
:param conf: The configuration, this is a Config instance
:return: A SecurityContext instance
"""
if not conf:
return None
try:
metadata = conf.metadata
except AttributeError:
metadata = None
sec_backend = None
if conf.crypto_backend == 'xmlsec1':
xmlsec_binary = conf.xmlsec_binary
if not xmlsec_binary:
try:
_path = conf.xmlsec_path
except AttributeError:
_path = []
xmlsec_binary = get_xmlsec_binary(_path)
# verify that xmlsec is where it's supposed to be
if not os.path.exists(xmlsec_binary):
# if not os.access(, os.F_OK):
err_msg = 'xmlsec binary not found: {binary}'
err_msg = err_msg.format(binary=xmlsec_binary)
raise SigverError(err_msg)
crypto = _get_xmlsec_cryptobackend(xmlsec_binary,
delete_tmpfiles=conf.delete_tmpfiles)
_file_name = conf.getattr('key_file', '')
if _file_name:
try:
rsa_key = import_rsa_key_from_file(_file_name)
except Exception as err:
logger.error('Cannot import key from {file}: {err_msg}'.format(
file=_file_name, err_msg=err))
raise
else:
sec_backend = RSACrypto(rsa_key)
elif conf.crypto_backend == 'XMLSecurity':
# new and somewhat untested pyXMLSecurity crypto backend.
crypto = CryptoBackendXMLSecurity()
else:
err_msg = 'Unknown crypto_backend {backend}'
err_msg = err_msg.format(backend=conf.crypto_backend)
raise SigverError(err_msg)
enc_key_files = []
if conf.encryption_keypairs is not None:
for _encryption_keypair in conf.encryption_keypairs:
if 'key_file' in _encryption_keypair:
enc_key_files.append(_encryption_keypair['key_file'])
return SecurityContext(
crypto,
conf.key_file,
cert_file=conf.cert_file,
metadata=metadata,
only_use_keys_in_metadata=conf.only_use_keys_in_metadata,
cert_handler_extra_class=conf.cert_handler_extra_class,
generate_cert_info=conf.generate_cert_info,
tmp_cert_file=conf.tmp_cert_file,
tmp_key_file=conf.tmp_key_file,
validate_certificate=conf.validate_certificate,
enc_key_files=enc_key_files,
encryption_keypairs=conf.encryption_keypairs,
sec_backend=sec_backend,
delete_tmpfiles=conf.delete_tmpfiles)
def encrypt_cert_from_item(item):
_encrypt_cert = None
try:
try:
_elem = extension_elements_to_elements(
item.extensions.extension_elements, [pefim, ds])
except:
_elem = extension_elements_to_elements(
item.extension_elements[0].children,
[pefim, ds])
for _tmp_elem in _elem:
if isinstance(_tmp_elem, SPCertEnc):
for _tmp_key_info in _tmp_elem.key_info:
if _tmp_key_info.x509_data is not None and len(
_tmp_key_info.x509_data) > 0:
_encrypt_cert = _tmp_key_info.x509_data[
0].x509_certificate.text
break
except Exception as _exception:
pass
if _encrypt_cert is not None:
if _encrypt_cert.find('-----BEGIN CERTIFICATE-----\n') == -1:
_encrypt_cert = '-----BEGIN CERTIFICATE-----\n' + _encrypt_cert
if _encrypt_cert.find('\n-----END CERTIFICATE-----') == -1:
_encrypt_cert = _encrypt_cert + '\n-----END CERTIFICATE-----'
return _encrypt_cert
class CertHandlerExtra(object):
def __init__(self):
pass
def use_generate_cert_func(self):
raise Exception('use_generate_cert_func function must be implemented')
def generate_cert(self, generate_cert_info, root_cert_string,
root_key_string):
raise Exception('generate_cert function must be implemented')
# Excepts to return (cert_string, key_string)
def use_validate_cert_func(self):
raise Exception('use_validate_cert_func function must be implemented')
def validate_cert(self, cert_str, root_cert_string, root_key_string):
raise Exception('validate_cert function must be implemented')
# Excepts to return True/False
class CertHandler(object):
def __init__(
self,
security_context,
cert_file=None, cert_type='pem',
key_file=None, key_type='pem',
generate_cert_info=None,
cert_handler_extra_class=None,
tmp_cert_file=None,
tmp_key_file=None,
verify_cert=False):
"""
Initiates the class for handling certificates. Enables the certificates
to either be a single certificate as base functionality or makes it
possible to generate a new certificate for each call to the function.
:param security_context:
:param cert_file:
:param cert_type:
:param key_file:
:param key_type:
:param generate_cert_info:
:param cert_handler_extra_class:
:param tmp_cert_file:
:param tmp_key_file:
:param verify_cert:
"""
self._verify_cert = False
self._generate_cert = False
# This cert do not have to be valid, it is just the last cert to be
# validated.
self._last_cert_verified = None
self._last_validated_cert = None
if cert_type == 'pem' and key_type == 'pem':
self._verify_cert = verify_cert is True
self._security_context = security_context
self._osw = OpenSSLWrapper()
if key_file and os.path.isfile(key_file):
self._key_str = self._osw.read_str_from_file(key_file, key_type)
else:
self._key_str = ''
if cert_file and os.path.isfile(cert_file):
self._cert_str = self._osw.read_str_from_file(cert_file,
cert_type)
else:
self._cert_str = ''
self._tmp_cert_str = self._cert_str
self._tmp_key_str = self._key_str
self._tmp_cert_file = tmp_cert_file
self._tmp_key_file = tmp_key_file
self._cert_info = None
self._generate_cert_func_active = False
if generate_cert_info is not None \
and len(self._cert_str) > 0 \
and len(self._key_str) > 0 \
and tmp_key_file is not None \
and tmp_cert_file is not None:
self._generate_cert = True
self._cert_info = generate_cert_info
self._cert_handler_extra_class = cert_handler_extra_class
def verify_cert(self, cert_file):
if self._verify_cert:
if cert_file and os.path.isfile(cert_file):
cert_str = self._osw.read_str_from_file(cert_file, 'pem')
else:
return False
self._last_validated_cert = cert_str
if self._cert_handler_extra_class is not None and \
self._cert_handler_extra_class.use_validate_cert_func():
self._cert_handler_extra_class.validate_cert(
cert_str, self._cert_str, self._key_str)
else:
valid, mess = self._osw.verify(self._cert_str, cert_str)
logger.info('CertHandler.verify_cert: %s', mess)
return valid
return True
def generate_cert(self):
return self._generate_cert
def update_cert(self, active=False, client_crt=None):
if (self._generate_cert and active) or client_crt is not None:
if client_crt is not None:
self._tmp_cert_str = client_crt
# No private key for signing
self._tmp_key_str = ''
elif self._cert_handler_extra_class is not None and \
self._cert_handler_extra_class.use_generate_cert_func():
(self._tmp_cert_str, self._tmp_key_str) = \
self._cert_handler_extra_class.generate_cert(
self._cert_info, self._cert_str, self._key_str)
else:
self._tmp_cert_str, self._tmp_key_str = self._osw \
.create_certificate(self._cert_info, request=True)
self._tmp_cert_str = self._osw.create_cert_signed_certificate(
self._cert_str, self._key_str, self._tmp_cert_str)
valid, mess = self._osw.verify(self._cert_str,
self._tmp_cert_str)
self._osw.write_str_to_file(self._tmp_cert_file, self._tmp_cert_str)
self._osw.write_str_to_file(self._tmp_key_file, self._tmp_key_str)
self._security_context.key_file = self._tmp_key_file
self._security_context.cert_file = self._tmp_cert_file
self._security_context.key_type = 'pem'
self._security_context.cert_type = 'pem'
self._security_context.my_cert = read_cert_from_file(
self._security_context.cert_file,
self._security_context.cert_type)
# How to get a rsa pub key fingerprint from a certificate
# openssl x509 -inform pem -noout -in server.crt -pubkey > publickey.pem
# openssl rsa -inform pem -noout -in publickey.pem -pubin -modulus
class SecurityContext(object):
my_cert = None
def __init__(
self,
crypto,
key_file='', key_type='pem',
cert_file='', cert_type='pem',
metadata=None,
template='',
encrypt_key_type='des-192',
only_use_keys_in_metadata=False,
cert_handler_extra_class=None,
generate_cert_info=None,
tmp_cert_file=None, tmp_key_file=None,
validate_certificate=None,
enc_key_files=None, enc_key_type='pem',
encryption_keypairs=None,
enc_cert_type='pem',
sec_backend=None,
delete_tmpfiles=True):
if not isinstance(crypto, CryptoBackend):
raise ValueError("crypto should be of type CryptoBackend")
self.crypto = crypto
if sec_backend and not isinstance(sec_backend, RSACrypto):
raise ValueError("sec_backend should be of type RSACrypto")
self.sec_backend = sec_backend
# Your private key for signing
self.key_file = key_file
self.key_type = key_type
# Your public key for signing
self.cert_file = cert_file
self.cert_type = cert_type
# Your private key for encryption
self.enc_key_files = enc_key_files
self.enc_key_type = enc_key_type
# Your public key for encryption
self.encryption_keypairs = encryption_keypairs
self.enc_cert_type = enc_cert_type
self.my_cert = read_cert_from_file(cert_file, cert_type)
self.cert_handler = CertHandler(
self,
cert_file, cert_type,
key_file, key_type,
generate_cert_info,
cert_handler_extra_class,
tmp_cert_file,
tmp_key_file,
validate_certificate)
self.cert_handler.update_cert(True)
self.metadata = metadata
self.only_use_keys_in_metadata = only_use_keys_in_metadata
if not template:
with _resource_path(_data_template, "template_enc.xml") as fp:
self.template = str(fp)
else:
self.template = template
self.encrypt_key_type = encrypt_key_type
self.delete_tmpfiles = delete_tmpfiles
def correctly_signed(self, xml, must=False):
logger.debug('verify correct signature')
return self.correctly_signed_response(xml, must)
def encrypt(self, text, recv_key='', template='', key_type=''):
"""
xmlsec encrypt --pubkey-pem pub-userkey.pem
--session-key aes128-cbc --xml-data doc-plain.xml
--output doc-encrypted.xml session-key-template.xml
:param text: Text to encrypt
:param recv_key: A file containing the receivers public key
:param template: A file containing the XMLSEC template
:param key_type: The type of session key to use
:result: An encrypted XML text
"""
if not key_type:
key_type = self.encrypt_key_type
if not template:
template = self.template
return self.crypto.encrypt(text, recv_key, template, key_type)
def encrypt_assertion(self, statement, enc_key, template, key_type='des-192', node_xpath=None):
"""
Will encrypt an assertion
:param statement: A XML document that contains the assertion to encrypt
:param enc_key: File name of a file containing the encryption key
:param template: A template for the encryption part to be added.
:param key_type: The type of session key to use.
:return: The encrypted text
"""
return self.crypto.encrypt_assertion(
statement, enc_key, template, key_type, node_xpath)
def decrypt_keys(self, enctext, keys=None):
""" Decrypting an encrypted text by the use of a private key.
:param enctext: The encrypted text as a string
:param keys: Keys to try to decrypt enctext with
:return: The decrypted text
"""
key_files = []
if not isinstance(keys, list):
keys = [keys]
keys_filtered = (key for key in keys if key)
keys_encoded = (
key.encode("ascii") if not isinstance(key, six.binary_type) else key
for key in keys_filtered
)
key_files = list(
make_temp(key, decode=False, delete_tmpfiles=self.delete_tmpfiles)
for key in keys_encoded
)
key_file_names = list(tmp.name for tmp in key_files)
try:
dectext = self.decrypt(enctext, key_file=key_file_names)
except DecryptError as e:
raise
else:
return dectext
def decrypt(self, enctext, key_file=None):
""" Decrypting an encrypted text by the use of a private key.
:param enctext: The encrypted text as a string
:return: The decrypted text
"""
if not isinstance(key_file, list):
key_file = [key_file]
key_files = [
key for key in itertools.chain(key_file, self.enc_key_files) if key
]
for key_file in key_files:
try:
dectext = self.crypto.decrypt(enctext, key_file)
except XmlsecError as e:
continue
else:
if dectext:
return dectext
errmsg = "No key was able to decrypt the ciphertext. Keys tried: {keys}"
errmsg = errmsg.format(keys=key_files)
raise DecryptError(errmsg)
def verify_signature(self, signedtext, cert_file=None, cert_type='pem', node_name=NODE_NAME, node_id=None):
""" Verifies the signature of a XML document.
:param signedtext: The XML document as a string
:param cert_file: The public key that was used to sign the document
:param cert_type: The file type of the certificate
:param node_name: The name of the class that is signed
:param node_id: The identifier of the node
:return: Boolean True if the signature was correct otherwise False.
"""
# This is only for testing purposes, otherwise when would you receive
# stuff that is signed with your key !?
if not cert_file:
cert_file = self.cert_file
cert_type = self.cert_type
return self.crypto.validate_signature(
signedtext,
cert_file=cert_file,
cert_type=cert_type,
node_name=node_name,
node_id=node_id,
)
def _check_signature(self, decoded_xml, item, node_name=NODE_NAME, origdoc=None, must=False, only_valid_cert=False, issuer=None):
try:
_issuer = item.issuer.text.strip()
except AttributeError:
_issuer = None
if _issuer is None:
try:
_issuer = issuer.text.strip()
except AttributeError:
_issuer = None
# More trust in certs from metadata then certs in the XML document
if self.metadata:
try:
_certs = self.metadata.certs(_issuer, 'any', 'signing')
except KeyError:
_certs = []
certs = []
for cert in _certs:
if isinstance(cert, six.string_types):
content = pem_format(cert)
tmp = make_temp(content,
suffix=".pem",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
certs.append(tmp)
else:
certs.append(cert)
else:
certs = []
if not certs and not self.only_use_keys_in_metadata:
logger.debug('==== Certs from instance ====')
certs = [
make_temp(content=pem_format(cert),
suffix=".pem",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
for cert in cert_from_instance(item)
]
else:
logger.debug('==== Certs from metadata ==== %s: %s ====', _issuer, certs)
if not certs:
raise MissingKey(_issuer)
# saml-core section "5.4 XML Signature Profile" defines constrains on the
# xmldsig-core facilities. It explicitly dictates that enveloped signatures
# are the only signatures allowed. This means that:
# * Assertion/RequestType/ResponseType elements must have an ID attribute
# * signatures must have a single Reference element
# * the Reference element must have a URI attribute
# * the URI attribute contains an anchor
# * the anchor points to the enclosing element's ID attribute
references = item.signature.signed_info.reference
signatures_must_have_a_single_reference_element = len(references) == 1
the_Reference_element_must_have_a_URI_attribute = (
signatures_must_have_a_single_reference_element
and hasattr(references[0], "uri")
)
the_URI_attribute_contains_an_anchor = (
the_Reference_element_must_have_a_URI_attribute
and references[0].uri.startswith("#")
and len(references[0].uri) > 1
)
the_anchor_points_to_the_enclosing_element_ID_attribute = (
the_URI_attribute_contains_an_anchor
and references[0].uri == "#{id}".format(id=item.id)
)
validators = {
"signatures must have a single reference element": (
signatures_must_have_a_single_reference_element
),
"the Reference element must have a URI attribute": (
the_Reference_element_must_have_a_URI_attribute
),
"the URI attribute contains an anchor": (
the_URI_attribute_contains_an_anchor
),
"the anchor points to the enclosing element ID attribute": (
the_anchor_points_to_the_enclosing_element_ID_attribute
),
}
if not all(validators.values()):
error_context = {
"message": "Signature failed to meet constraints on xmldsig",
"validators": validators,
"item ID": item.id,
"reference URI": item.signature.signed_info.reference[0].uri,
"issuer": _issuer,
"node name": node_name,
"xml document": decoded_xml,
}
raise SignatureError(error_context)
verified = False
last_pem_file = None
for pem_fd in certs:
try:
last_pem_file = pem_fd.name
if self.verify_signature(
decoded_xml,
pem_fd.name,
node_name=node_name,
node_id=item.id,
):
verified = True
break
except XmlsecError as exc:
logger.error('check_sig: %s', exc)
pass
except Exception as exc:
logger.error('check_sig: %s', exc)
raise
if verified or only_valid_cert:
if not self.cert_handler.verify_cert(last_pem_file):
raise CertificateError('Invalid certificate!')
else:
raise SignatureError('Failed to verify signature')
return item
def check_signature(self, item, node_name=NODE_NAME, origdoc=None, must=False, issuer=None):
"""
:param item: Parsed entity
:param node_name: The name of the node/class/element that is signed
:param origdoc: The original XML string
:param must:
:return:
"""
return self._check_signature(
origdoc,
item,
node_name,
origdoc,
must=must,
issuer=issuer,
)
def correctly_signed_message(self, decoded_xml, msgtype, must=False, origdoc=None, only_valid_cert=False):
"""Check if a request is correctly signed, if we have metadata for
the entity that sent the info use that, if not use the key that are in
the message if any.
:param decoded_xml: The SAML message as an XML infoset (a string)
:param msgtype: SAML protocol message type
:param must: Whether there must be a signature
:param origdoc:
:return:
"""
attr = '{type}_from_string'.format(type=msgtype)
_func = getattr(saml, attr, None)
_func = getattr(samlp, attr, _func)
msg = _func(decoded_xml)
if not msg:
raise TypeError('Not a {type}'.format(type=msgtype))
if not msg.signature:
if must:
err_msg = 'Required signature missing on {type}'
err_msg = err_msg.format(type=msgtype)
raise SignatureError(err_msg)
else:
return msg
return self._check_signature(
decoded_xml,
msg,
class_name(msg),
origdoc,
must=must,
only_valid_cert=only_valid_cert)
def correctly_signed_authn_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authn_request', must, origdoc, only_valid_cert=only_valid_cert)
def correctly_signed_authn_query(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authn_query', must, origdoc, only_valid_cert)
def correctly_signed_logout_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'logout_request', must, origdoc, only_valid_cert)
def correctly_signed_logout_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'logout_response', must, origdoc, only_valid_cert)
def correctly_signed_attribute_query(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'attribute_query', must, origdoc, only_valid_cert)
def correctly_signed_authz_decision_query(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authz_decision_query', must, origdoc, only_valid_cert)
def correctly_signed_authz_decision_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authz_decision_response', must, origdoc, only_valid_cert)
def correctly_signed_name_id_mapping_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'name_id_mapping_request', must, origdoc, only_valid_cert)
def correctly_signed_name_id_mapping_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'name_id_mapping_response', must, origdoc, only_valid_cert)
def correctly_signed_artifact_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'artifact_request', must, origdoc, only_valid_cert)
def correctly_signed_artifact_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'artifact_response', must, origdoc, only_valid_cert)
def correctly_signed_manage_name_id_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'manage_name_id_request', must, origdoc, only_valid_cert)
def correctly_signed_manage_name_id_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'manage_name_id_response', must, origdoc, only_valid_cert)
def correctly_signed_assertion_id_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'assertion_id_request', must, origdoc, only_valid_cert)
def correctly_signed_assertion_id_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'assertion', must, origdoc, only_valid_cert)
def correctly_signed_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, require_response_signature=False, **kwargs):
""" Check if a instance is correctly signed, if we have metadata for
the IdP that sent the info use that, if not use the key that are in
the message if any.
:param decoded_xml: The SAML message as a XML string
:param must: Whether there must be a signature
:param origdoc:
:param only_valid_cert:
:param require_response_signature:
:return: None if the signature can not be verified otherwise an instance
"""
response = samlp.any_response_from_string(decoded_xml)
if not response:
raise TypeError('Not a Response')
if response.signature:
if 'do_not_verify' in kwargs:
pass
else:
self._check_signature(decoded_xml, response,
class_name(response), origdoc)
elif require_response_signature:
raise SignatureError('Signature missing for response')
return response
def sign_statement_using_xmlsec(self, statement, **kwargs):
""" Deprecated function. See sign_statement(). """
return self.sign_statement(statement, **kwargs)
def sign_statement(self, statement, node_name, key=None, key_file=None, node_id=None):
"""Sign a SAML statement.
:param statement: The statement to be signed
:param node_name: string like 'urn:oasis:names:...:Assertion'
:param key: The key to be used for the signing, either this or
:param key_file: The file where the key can be found
:param node_id:
:return: The signed statement
"""
if not key_file and key:
content = str(key).encode()
tmp = make_temp(content, suffix=".pem", delete_tmpfiles=self.delete_tmpfiles)
key_file = tmp.name
if not key and not key_file:
key_file = self.key_file
return self.crypto.sign_statement(
statement,
node_name,
key_file,
node_id,
)
def sign_assertion(self, statement, **kwargs):
"""Sign a SAML assertion.
See sign_statement() for the kwargs.
:param statement: The statement to be signed
:return: The signed statement
"""
return self.sign_statement(
statement, class_name(saml.Assertion()), **kwargs)
def sign_attribute_query_using_xmlsec(self, statement, **kwargs):
""" Deprecated function. See sign_attribute_query(). """
return self.sign_attribute_query(statement, **kwargs)
def sign_attribute_query(self, statement, **kwargs):
"""Sign a SAML attribute query.
See sign_statement() for the kwargs.
:param statement: The statement to be signed
:return: The signed statement
"""
return self.sign_statement(
statement, class_name(samlp.AttributeQuery()), **kwargs)
def multiple_signatures(self, statement, to_sign, key=None, key_file=None, sign_alg=None, digest_alg=None):
"""
Sign multiple parts of a statement
:param statement: The statement that should be sign, this is XML text
:param to_sign: A list of (items, id) tuples that specifies what to sign
:param key: A key that should be used for doing the signing
:param key_file: A file that contains the key to be used
:return: A possibly multiple signed statement
"""
for (item, sid) in to_sign:
if not sid:
if not item.id:
sid = item.id = sid()
else:
sid = item.id
if not item.signature:
item.signature = pre_signature_part(
ident=sid,
public_key=self.cert_file,
sign_alg=sign_alg,
digest_alg=digest_alg,
)
statement = self.sign_statement(
statement,
class_name(item),
key=key,
key_file=key_file,
node_id=sid,
)
return statement
# XXX FIXME calls DefaultSignature - remove to unveil chain of calls without proper args
def pre_signature_part(
ident,
public_key=None,
identifier=None,
digest_alg=None,
sign_alg=None,
):
"""
If an assertion is to be signed the signature part has to be preset
with which algorithms to be used, this function returns such a
preset part.
:param ident: The identifier of the assertion, so you know which assertion
was signed
:param public_key: The base64 part of a PEM file
:param identifier:
:return: A preset signature part
"""
# XXX
if not digest_alg:
digest_alg = ds.DefaultSignature().get_digest_alg()
if not sign_alg:
sign_alg = ds.DefaultSignature().get_sign_alg()
signature_method = ds.SignatureMethod(algorithm=sign_alg)
canonicalization_method = ds.CanonicalizationMethod(
algorithm=ds.ALG_EXC_C14N)
trans0 = ds.Transform(algorithm=ds.TRANSFORM_ENVELOPED)
trans1 = ds.Transform(algorithm=ds.ALG_EXC_C14N)
transforms = ds.Transforms(transform=[trans0, trans1])
digest_method = ds.DigestMethod(algorithm=digest_alg)
reference = ds.Reference(
uri='#{id}'.format(id=ident),
digest_value=ds.DigestValue(),
transforms=transforms,
digest_method=digest_method)
signed_info = ds.SignedInfo(
signature_method=signature_method,
canonicalization_method=canonicalization_method,
reference=reference)
signature = ds.Signature(
signed_info=signed_info,
signature_value=ds.SignatureValue())
if identifier:
signature.id = 'Signature{n}'.format(n=identifier)
if public_key:
x509_data = ds.X509Data(
x509_certificate=[ds.X509Certificate(text=public_key)])
key_info = ds.KeyInfo(x509_data=x509_data)
signature.key_info = key_info
return signature
# <?xml version="1.0" encoding="UTF-8"?>
# <EncryptedData Id="ED" Type="http://www.w3.org/2001/04/xmlenc#Element"
# xmlns="http://www.w3.org/2001/04/xmlenc#">
# <EncryptionMethod Algorithm="http://www.w3
# .org/2001/04/xmlenc#tripledes-cbc"/>
# <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
# <EncryptedKey Id="EK" xmlns="http://www.w3.org/2001/04/xmlenc#">
# <EncryptionMethod Algorithm="http://www.w3
# .org/2001/04/xmlenc#rsa-1_5"/>
# <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
# <ds:KeyName>my-rsa-key</ds:KeyName>
# </ds:KeyInfo>
# <CipherData>
# <CipherValue>
# </CipherValue>
# </CipherData>
# <ReferenceList>
# <DataReference URI="#ED"/>
# </ReferenceList>
# </EncryptedKey>
# </ds:KeyInfo>
# <CipherData>
# <CipherValue>
# </CipherValue>
# </CipherData>
# </EncryptedData>
def pre_encryption_part(msg_enc=TRIPLE_DES_CBC, key_enc=RSA_1_5, key_name='my-rsa-key',
encrypted_key_id=None, encrypted_data_id=None):
"""
:param msg_enc:
:param key_enc:
:param key_name:
:return:
"""
ek_id = encrypted_key_id or "EK_{id}".format(id=gen_random_key())
ed_id = encrypted_data_id or "ED_{id}".format(id=gen_random_key())
msg_encryption_method = EncryptionMethod(algorithm=msg_enc)
key_encryption_method = EncryptionMethod(algorithm=key_enc)
encrypted_key = EncryptedKey(
id=ek_id,
encryption_method=key_encryption_method,
key_info=ds.KeyInfo(key_name=ds.KeyName(text=key_name)),
cipher_data=CipherData(cipher_value=CipherValue(text='')),
)
key_info = ds.KeyInfo(encrypted_key=encrypted_key)
encrypted_data = EncryptedData(
id=ed_id,
type='http://www.w3.org/2001/04/xmlenc#Element',
encryption_method=msg_encryption_method,
key_info=key_info,
cipher_data=CipherData(cipher_value=CipherValue(text='')))
return encrypted_data
def pre_encrypt_assertion(response):
"""
Move the assertion to within a encrypted_assertion
:param response: The response with one assertion
:return: The response but now with the assertion within an
encrypted_assertion.
"""
assertion = response.assertion
response.assertion = None
response.encrypted_assertion = EncryptedAssertion()
if assertion is not None:
if isinstance(assertion, list):
response.encrypted_assertion.add_extension_elements(assertion)
else:
response.encrypted_assertion.add_extension_element(assertion)
return response
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--list-sigalgs', dest='listsigalgs',
action='store_true',
help='List implemented signature algorithms')
args = parser.parse_args()
if args.listsigalgs:
print('\n'.join([key for key, value in SIGNER_ALGS.items()]))
| ./CrossVul/dataset_final_sorted/CWE-347/py/good_1889_0 |
crossvul-python_data_bad_4360_1 | import logging
import os.path
import warnings
from typing import Dict
from typing import Optional
from oic import rndstr
from oic.exception import AuthzError
from oic.exception import MessageException
from oic.exception import NotForMe
from oic.exception import PyoidcError
from oic.oauth2 import Grant
from oic.oauth2.consumer import TokenError
from oic.oauth2.consumer import UnknownState
from oic.oauth2.consumer import stateID
from oic.oauth2.message import ErrorResponse
from oic.oic import ENDPOINTS
from oic.oic import Client
from oic.oic.message import AccessTokenResponse
from oic.oic.message import AuthorizationRequest
from oic.oic.message import AuthorizationResponse
from oic.oic.message import BackChannelLogoutRequest
from oic.oic.message import Claims
from oic.oic.message import ClaimsRequest
from oic.utils import http_util
from oic.utils.sanitize import sanitize
from oic.utils.sdb import DictSessionBackend
from oic.utils.sdb import SessionBackend
from oic.utils.sdb import session_extended_get
from oic.utils.sdb import session_get
from oic.utils.sdb import session_update
__author__ = "rohe0002"
logger = logging.getLogger(__name__)
def factory(kaka, sdb, config):
"""
Return the right Consumer instance dependent on what's in the cookie.
:param kaka: The cookie
:param sdb: The session database
:param config: The common Consumer configuration
:return: Consumer instance or None
"""
part = http_util.cookie_parts(config["name"], kaka)
if part is None:
return None
cons = Consumer(sdb, config)
cons.restore(part[0])
http_util.parse_cookie(config["name"], cons.seed, kaka)
return cons
def build_userinfo_claims(claims, sformat="signed", locale="us-en"):
"""
Create userinfo request based on claims.
config example::
"userinfo":{
"name": {"essential": true},
"nickname": null,
"email": {"essential": true},
"email_verified": {"essential": true},
"picture": null
}
"""
return Claims(format=sformat, **claims)
def clean_response(aresp):
"""
Create a new instance with only the standard attributes.
:param aresp: The original AccessTokenResponse
:return: An AccessTokenResponse instance
"""
atr = AccessTokenResponse()
for prop in atr.parameters():
try:
atr[prop] = aresp[prop]
except KeyError:
pass
return atr
IGNORE = [
"request2endpoint",
"response2error",
"grant_class",
"token_class",
"sdb",
"wf",
"events",
"message_factory",
]
CONSUMER_PREF_ARGS = [
"token_endpoint_auth_method",
"subject_type",
"require_signed_request_object",
"userinfo_signed_response_algs",
"userinfo_encrypted_response_alg",
"userinfo_encrypted_response_enc",
"userinfo_encrypted_response_int",
"id_token_signed_response_algs",
"id_token_encrypted_response_alg",
"id_token_encrypted_response_enc",
"id_token_encrypted_response_int",
"request_object_signing_alg",
"request_object_encryption_alg",
"request_object_encryption_enc",
"default_max_age",
"require_auth_time",
"default_acr_values",
]
class Consumer(Client):
"""An OpenID Connect consumer implementation."""
def __init__(
self,
session_db,
consumer_config,
client_config=None,
server_info=None,
debug=False,
client_prefs=None,
sso_db=None,
):
"""
Initialize a Consumer instance.
:param session_db: Where info are kept about sessions
:param config: Configuration of the consumer
:param client_config: Client configuration
:param server_info: Information about the server
:param client_prefs: Run time preferences, which are chosen depends
on what the server can do.
"""
if client_config is None:
client_config = {}
Client.__init__(self, **client_config)
self.consumer_config = consumer_config
if consumer_config:
try:
self.debug = consumer_config["debug"]
except KeyError:
self.debug = 0
if server_info:
for endpoint in ENDPOINTS:
try:
setattr(self, endpoint, server_info[endpoint])
except KeyError:
setattr(self, endpoint, "")
if not isinstance(session_db, SessionBackend):
warnings.warn(
"Please use `SessionBackend` to ensure proper API for the database.",
DeprecationWarning,
)
self.sdb = session_db
if sso_db is not None:
if not isinstance(sso_db, SessionBackend):
warnings.warn(
"Please use `SessionBackend` to ensure proper API for the database.",
DeprecationWarning,
)
self.sso_db: SessionBackend = sso_db
else:
self.sso_db = DictSessionBackend()
self.debug = debug
self.seed = ""
self.nonce = ""
self.request_filename = ""
self.request_uri = ""
self.user_info = None
self.registration_expires_at = 0
self.secret_type = "Bearer"
def update(self, sid):
"""
Update the instance variables from something stored in the session database.
Will not overwrite something that's already there.
Except for the grant dictionary !!
:param sid: Session identifier
"""
for key, val in self.sdb[sid].items():
try:
_val = getattr(self, key)
except AttributeError:
continue
if not _val and val:
setattr(self, key, val)
elif key == "grant" and val:
# val is a Grant instance
val.update(_val)
setattr(self, key, val)
def restore(self, sid):
"""
Restore the instance variables from something stored in the session database.
:param sid: Session identifier
"""
for key, val in self.sdb[sid].items():
setattr(self, key, val)
def dictionary(self):
return dict([(k, v) for k, v in self.__dict__.items() if k not in IGNORE])
def _backup(self, sid):
"""
Store instance variable values in the session store under a session identifier.
:param sid: Session identifier
"""
self.sdb[sid] = self.dictionary()
def begin(self, scope="", response_type="", use_nonce=False, path="", **kwargs):
"""
Begin the OIDC flow.
:param scope: Defines which user info claims is wanted
:param response_type: Controls the parameters returned in the response from the Authorization Endpoint
:param use_nonce: If not implicit flow nonce is optional. This defines if it should be used anyway.
:param path: The path part of the redirect URL
:return: A 2-tuple, session identifier and URL to which the user should be redirected
"""
_log_info = logger.info
if self.debug:
_log_info("- begin -")
_page = self.consumer_config["authz_page"]
if not path.endswith("/"):
if _page.startswith("/"):
self.redirect_uris = [path + _page]
else:
self.redirect_uris = ["%s/%s" % (path, _page)]
else:
if _page.startswith("/"):
self.redirect_uris = [path + _page[1:]]
else:
self.redirect_uris = ["%s/%s" % (path, _page)]
# Put myself in the dictionary of sessions, keyed on session-id
if not self.seed:
self.seed = rndstr()
if not scope:
scope = self.consumer_config["scope"]
if not response_type:
response_type = self.consumer_config["response_type"]
sid = stateID(path, self.seed)
self.grant[sid] = Grant(seed=self.seed)
self._backup(sid)
self.sdb["seed:%s" % self.seed] = sid
self.sso_db[sid] = {}
args = {
"client_id": self.client_id,
"state": sid,
"response_type": response_type,
"scope": scope,
}
# nonce is REQUIRED in implicit flow,
# OPTIONAL on code flow.
if "token" in response_type or use_nonce:
args["nonce"] = rndstr(12)
self.state2nonce[sid] = args["nonce"]
if "max_age" in self.consumer_config:
args["max_age"] = self.consumer_config["max_age"]
_claims = None
if "user_info" in self.consumer_config:
_claims = ClaimsRequest(
userinfo=Claims(**self.consumer_config["user_info"])
)
if "id_token" in self.consumer_config:
if _claims:
_claims["id_token"] = Claims(**self.consumer_config["id_token"])
else:
_claims = ClaimsRequest(
id_token=Claims(**self.consumer_config["id_token"])
)
if _claims:
args["claims"] = _claims
if "request_method" in self.consumer_config:
areq = self.construct_AuthorizationRequest(
request_args=args, extra_args=None, request_param="request"
)
if self.consumer_config["request_method"] == "file":
id_request = areq["request"]
del areq["request"]
_filedir = self.consumer_config["temp_dir"]
_webpath = self.consumer_config["temp_path"]
_name = rndstr(10)
filename = os.path.join(_filedir, _name)
while os.path.exists(filename):
_name = rndstr(10)
filename = os.path.join(_filedir, _name)
fid = open(filename, mode="w")
fid.write(id_request)
fid.close()
_webname = "%s%s/%s" % (path, _webpath, _name)
areq["request_uri"] = _webname
self.request_uri = _webname
self._backup(sid)
else:
if "userinfo_claims" in args: # can only be carried in an IDRequest
raise PyoidcError("Need a request method")
areq = self.construct_AuthorizationRequest(
AuthorizationRequest, request_args=args
)
location = areq.request(self.authorization_endpoint)
if self.debug:
_log_info("Redirecting to: %s" % location)
return sid, location
def _parse_authz(self, query="", **kwargs):
_log_info = logger.info
# Might be an error response
_log_info("Expect Authorization Response")
aresp = self.parse_response(
AuthorizationResponse, info=query, sformat="urlencoded", keyjar=self.keyjar
)
if isinstance(aresp, ErrorResponse):
_log_info("ErrorResponse: %s" % sanitize(aresp))
raise AuthzError(aresp.get("error"), aresp)
_log_info("Aresp: %s" % sanitize(aresp))
_state = aresp["state"]
try:
self.update(_state)
except KeyError:
raise UnknownState(_state, aresp)
self.redirect_uris = [self.sdb[_state]["redirect_uris"]]
return aresp, _state
def parse_authz(self, query="", **kwargs):
"""
Parse authorization response from server.
Couple of cases
["code"]
["code", "token"]
["code", "id_token", "token"]
["id_token"]
["id_token", "token"]
["token"]
:return: A AccessTokenResponse instance
"""
_log_info = logger.info
logger.debug("- authorization -")
if not query:
return http_util.BadRequest("Missing query")
_log_info("response: %s" % sanitize(query))
if "code" in self.consumer_config["response_type"]:
aresp, _state = self._parse_authz(query, **kwargs)
# May have token and id_token information too
if "access_token" in aresp:
atr = clean_response(aresp)
self.access_token = atr
# update the grant object
self.get_grant(state=_state).add_token(atr)
else:
atr = None
self._backup(_state)
try:
idt = aresp["id_token"]
except KeyError:
idt = None
else:
try:
session_update(self.sdb, idt["sid"], "smid", _state)
except KeyError:
pass
return aresp, atr, idt
elif "token" in self.consumer_config["response_type"]: # implicit flow
_log_info("Expect Access Token Response")
atr = self.parse_response(
AccessTokenResponse,
info=query,
sformat="urlencoded",
keyjar=self.keyjar,
**kwargs,
)
if isinstance(atr, ErrorResponse):
raise TokenError(atr.get("error"), atr)
idt = None
return None, atr, idt
else: # only id_token
aresp, _state = self._parse_authz(query, **kwargs)
try:
idt = aresp["id_token"]
except KeyError:
idt = None
else:
try:
session_update(self.sso_db, _state, "smid", idt["sid"])
except KeyError:
pass
return None, None, idt
def complete(self, state):
"""
Do the access token request, the last step in a code flow.
If Implicit flow was used then this method is never used.
"""
args = {"redirect_uri": self.redirect_uris[0]}
if "password" in self.consumer_config and self.consumer_config["password"]:
logger.info("basic auth")
http_args = {"password": self.consumer_config["password"]}
elif self.client_secret:
logger.info("request_body auth")
http_args = {}
args.update(
{
"client_secret": self.client_secret,
"client_id": self.client_id,
"secret_type": self.secret_type,
}
)
else:
raise PyoidcError("Nothing to authenticate with")
resp = self.do_access_token_request(
state=state, request_args=args, http_args=http_args
)
logger.info("Access Token Response: %s" % sanitize(resp))
if resp.type() == "ErrorResponse":
raise TokenError(resp.error, resp)
self._backup(state)
return resp
def refresh_token(self):
pass
def get_user_info(self, state):
uinfo = self.do_user_info_request(state=state, schema="openid")
if uinfo.type() == "ErrorResponse":
raise TokenError(uinfo.error, uinfo)
self.user_info = uinfo
self._backup(state)
return uinfo
def refresh_session(self):
pass
def check_session(self):
"""
Check session endpoint.
With python you could use PyQuery to get the onclick attribute of each
anchor tag, parse that with a regular expression to get the placeId,
build the /places/duplicates.jsp?inPID= URL yourself, use requests to
load the content at that URL, then PyQuery again on the content to get
the data you need.
for iframe in mosoup("iframe"):
mosoup.iframe.extract()
It accepts postMessage requests from the relevant RP iframe and uses
postMessage to post back the login status of the End-User at the OP.
:return:
"""
pass
def end_session(self):
pass
# LOGOUT related
def backchannel_logout(
self, request: Optional[str] = None, request_args: Optional[Dict] = None
) -> str:
"""
Receives a back channel logout request.
:param request: A urlencoded request
:param request_args: The request as a dictionary
:return: A Session Identifier
"""
if request:
req = BackChannelLogoutRequest().from_urlencoded(request)
elif request_args is not None:
req = BackChannelLogoutRequest(**request_args)
else:
raise ValueError("Missing request specification")
kwargs = {"aud": self.client_id, "iss": self.issuer, "keyjar": self.keyjar}
try:
req.verify(**kwargs)
except (MessageException, ValueError, NotForMe) as err:
raise MessageException("Bogus logout request: {}".format(err))
# Find the subject through 'sid' or 'sub'
try:
sub = req["logout_token"]["sub"]
except KeyError:
# verify has guaranteed that there will be a sid if sub is missing
sm_id = req["logout_token"]["sid"]
_sid = session_get(self.sso_db, "smid", sm_id)
else:
_sid = session_extended_get(
self.sso_db, sub, "issuer", req["logout_token"]["iss"]
)
return _sid
| ./CrossVul/dataset_final_sorted/CWE-347/py/bad_4360_1 |
crossvul-python_data_bad_4590_0 | """ Functions connected to signing and verifying.
Based on the use of xmlsec1 binaries and not the python xmlsec module.
"""
from OpenSSL import crypto
import base64
import hashlib
import itertools
import logging
import os
import six
from time import mktime
from six.moves.urllib import parse
import saml2.cryptography.asymmetric
import saml2.cryptography.pki
from tempfile import NamedTemporaryFile
from subprocess import Popen
from subprocess import PIPE
from saml2 import samlp
from saml2 import SamlBase
from saml2 import SAMLError
from saml2 import extension_elements_to_elements
from saml2 import class_name
from saml2 import saml
from saml2 import ExtensionElement
from saml2 import VERSION
from saml2.cert import OpenSSLWrapper
from saml2.extension import pefim
from saml2.extension.pefim import SPCertEnc
from saml2.saml import EncryptedAssertion
import saml2.xmldsig as ds
from saml2.s_utils import sid
from saml2.s_utils import Unsupported
from saml2.time_util import instant
from saml2.time_util import str_to_time
from saml2.xmldsig import SIG_RSA_SHA1
from saml2.xmldsig import SIG_RSA_SHA224
from saml2.xmldsig import SIG_RSA_SHA256
from saml2.xmldsig import SIG_RSA_SHA384
from saml2.xmldsig import SIG_RSA_SHA512
from saml2.xmlenc import EncryptionMethod
from saml2.xmlenc import EncryptedKey
from saml2.xmlenc import CipherData
from saml2.xmlenc import CipherValue
from saml2.xmlenc import EncryptedData
logger = logging.getLogger(__name__)
SIG = '{{{ns}#}}{attribute}'.format(ns=ds.NAMESPACE, attribute='Signature')
RSA_1_5 = 'http://www.w3.org/2001/04/xmlenc#rsa-1_5'
TRIPLE_DES_CBC = 'http://www.w3.org/2001/04/xmlenc#tripledes-cbc'
class SigverError(SAMLError):
pass
class CertificateTooOld(SigverError):
pass
class XmlsecError(SigverError):
pass
class MissingKey(SigverError):
pass
class DecryptError(XmlsecError):
pass
class EncryptError(XmlsecError):
pass
class SignatureError(XmlsecError):
pass
class BadSignature(SigverError):
"""The signature is invalid."""
pass
class CertificateError(SigverError):
pass
def read_file(*args, **kwargs):
with open(*args, **kwargs) as handler:
return handler.read()
def rm_xmltag(statement):
XMLTAG = "<?xml version='1.0'?>"
PREFIX1 = "<?xml version='1.0' encoding='UTF-8'?>"
PREFIX2 = '<?xml version="1.0" encoding="UTF-8"?>'
try:
_t = statement.startswith(XMLTAG)
except TypeError:
statement = statement.decode()
_t = statement.startswith(XMLTAG)
if _t:
statement = statement[len(XMLTAG):]
if statement[0] == '\n':
statement = statement[1:]
elif statement.startswith(PREFIX1):
statement = statement[len(PREFIX1):]
if statement[0] == '\n':
statement = statement[1:]
elif statement.startswith(PREFIX2):
statement = statement[len(PREFIX2):]
if statement[0] == '\n':
statement = statement[1:]
return statement
def signed(item):
"""
Is any part of the document signed ?
:param item: A Samlbase instance
:return: True if some part of it is signed
"""
if SIG in item.c_children.keys() and item.signature:
return True
else:
for prop in item.c_child_order:
child = getattr(item, prop, None)
if isinstance(child, list):
for chi in child:
if signed(chi):
return True
elif child and signed(child):
return True
return False
def get_xmlsec_binary(paths=None):
"""
Tries to find the xmlsec1 binary.
:param paths: Non-system path paths which should be searched when
looking for xmlsec1
:return: full name of the xmlsec1 binary found. If no binaries are
found then an exception is raised.
"""
if os.name == 'posix':
bin_name = ['xmlsec1']
elif os.name == 'nt':
bin_name = ['xmlsec.exe', 'xmlsec1.exe']
else: # Default !?
bin_name = ['xmlsec1']
if paths:
for bname in bin_name:
for path in paths:
fil = os.path.join(path, bname)
try:
if os.lstat(fil):
return fil
except OSError:
pass
for path in os.environ['PATH'].split(os.pathsep):
for bname in bin_name:
fil = os.path.join(path, bname)
try:
if os.lstat(fil):
return fil
except OSError:
pass
raise SigverError('Cannot find {binary}'.format(binary=bin_name))
def _get_xmlsec_cryptobackend(path=None, search_paths=None, delete_tmpfiles=True):
"""
Initialize a CryptoBackendXmlSec1 crypto backend.
This function is now internal to this module.
"""
if path is None:
path = get_xmlsec_binary(paths=search_paths)
return CryptoBackendXmlSec1(path, delete_tmpfiles=delete_tmpfiles)
NODE_NAME = 'urn:oasis:names:tc:SAML:2.0:assertion:Assertion'
ENC_NODE_NAME = 'urn:oasis:names:tc:SAML:2.0:assertion:EncryptedAssertion'
ENC_KEY_CLASS = 'EncryptedKey'
def _make_vals(val, klass, seccont, klass_inst=None, prop=None, part=False,
base64encode=False, elements_to_sign=None):
"""
Creates a class instance with a specified value, the specified
class instance may be a value on a property in a defined class instance.
:param val: The value
:param klass: The value class
:param klass_inst: The class instance which has a property on which
what this function returns is a value.
:param prop: The property which the value should be assigned to.
:param part: If the value is one of a possible list of values it should be
handled slightly different compared to if it isn't.
:return: Value class instance
"""
cinst = None
if isinstance(val, dict):
cinst = _instance(klass, val, seccont, base64encode=base64encode,
elements_to_sign=elements_to_sign)
else:
try:
cinst = klass().set_text(val)
except ValueError:
if not part:
cis = [
_make_vals(
sval,
klass,
seccont,
klass_inst,
prop,
True,
base64encode,
elements_to_sign)
for sval in val
]
setattr(klass_inst, prop, cis)
else:
raise
if part:
return cinst
else:
if cinst:
cis = [cinst]
setattr(klass_inst, prop, cis)
def _instance(klass, ava, seccont, base64encode=False, elements_to_sign=None):
instance = klass()
for prop in instance.c_attributes.values():
if prop in ava:
if isinstance(ava[prop], bool):
setattr(instance, prop, str(ava[prop]).encode())
elif isinstance(ava[prop], int):
setattr(instance, prop, str(ava[prop]))
else:
setattr(instance, prop, ava[prop])
if 'text' in ava:
instance.set_text(ava['text'], base64encode)
for prop, klassdef in instance.c_children.values():
if prop in ava:
if isinstance(klassdef, list):
# means there can be a list of values
_make_vals(ava[prop], klassdef[0], seccont, instance, prop,
base64encode=base64encode,
elements_to_sign=elements_to_sign)
else:
cis = _make_vals(ava[prop], klassdef, seccont, instance, prop,
True, base64encode, elements_to_sign)
setattr(instance, prop, cis)
if 'extension_elements' in ava:
for item in ava['extension_elements']:
instance.extension_elements.append(
ExtensionElement(item['tag']).loadd(item))
if 'extension_attributes' in ava:
for key, val in ava['extension_attributes'].items():
instance.extension_attributes[key] = val
if 'signature' in ava:
elements_to_sign.append((class_name(instance), instance.id))
return instance
def signed_instance_factory(instance, seccont, elements_to_sign=None):
"""
:param instance: The instance to be signed or not
:param seccont: The security context
:param elements_to_sign: Which parts if any that should be signed
:return: A class instance if not signed otherwise a string
"""
if elements_to_sign:
signed_xml = instance
if not isinstance(instance, six.string_types):
signed_xml = instance.to_string()
for (node_name, nodeid) in elements_to_sign:
signed_xml = seccont.sign_statement(
signed_xml, node_name=node_name, node_id=nodeid)
return signed_xml
else:
return instance
def make_temp(content, suffix="", decode=True, delete_tmpfiles=True):
"""
Create a temporary file with the given content.
This is needed by xmlsec in some cases where only strings exist when files
are expected.
:param content: The information to be placed in the file
:param suffix: The temporary file might have to have a specific
suffix in certain circumstances.
:param decode: The input content might be base64 coded. If so it
must, in some cases, be decoded before being placed in the file.
:param delete_tmpfiles: Whether to keep the tmp files or delete them when they are
no longer in use
:return: 2-tuple with file pointer ( so the calling function can
close the file) and filename (which is for instance needed by the
xmlsec function).
"""
content_encoded = (
content.encode("utf-8") if not isinstance(content, six.binary_type) else content
)
content_raw = base64.b64decode(content_encoded) if decode else content_encoded
ntf = NamedTemporaryFile(suffix=suffix, delete=delete_tmpfiles)
ntf.write(content_raw)
ntf.seek(0)
return ntf
def split_len(seq, length):
return [seq[i:i + length] for i in range(0, len(seq), length)]
M2_TIME_FORMAT = '%b %d %H:%M:%S %Y'
def to_time(_time):
assert _time.endswith(' GMT')
_time = _time[:-4]
return mktime(str_to_time(_time, M2_TIME_FORMAT))
def active_cert(key):
"""
Verifies that a key is active that is present time is after not_before
and before not_after.
:param key: The Key
:return: True if the key is active else False
"""
try:
cert_str = pem_format(key)
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
assert cert.has_expired() == 0
assert not OpenSSLWrapper().certificate_not_valid_yet(cert)
return True
except AssertionError:
return False
except AttributeError:
return False
def cert_from_key_info(key_info, ignore_age=False):
""" Get all X509 certs from a KeyInfo instance. Care is taken to make sure
that the certs are continues sequences of bytes.
All certificates appearing in an X509Data element MUST relate to the
validation key by either containing it or being part of a certification
chain that terminates in a certificate containing the validation key.
:param key_info: The KeyInfo instance
:return: A possibly empty list of certs
"""
res = []
for x509_data in key_info.x509_data:
x509_certificate = x509_data.x509_certificate
cert = x509_certificate.text.strip()
cert = '\n'.join(split_len(''.join([s.strip() for s in
cert.split()]), 64))
if ignore_age or active_cert(cert):
res.append(cert)
else:
logger.info('Inactive cert')
return res
def cert_from_key_info_dict(key_info, ignore_age=False):
""" Get all X509 certs from a KeyInfo dictionary. Care is taken to make sure
that the certs are continues sequences of bytes.
All certificates appearing in an X509Data element MUST relate to the
validation key by either containing it or being part of a certification
chain that terminates in a certificate containing the validation key.
:param key_info: The KeyInfo dictionary
:return: A possibly empty list of certs in their text representation
"""
res = []
if 'x509_data' not in key_info:
return res
for x509_data in key_info['x509_data']:
x509_certificate = x509_data['x509_certificate']
cert = x509_certificate['text'].strip()
cert = '\n'.join(split_len(''.join(
[s.strip() for s in cert.split()]), 64))
if ignore_age or active_cert(cert):
res.append(cert)
else:
logger.info('Inactive cert')
return res
def cert_from_instance(instance):
""" Find certificates that are part of an instance
:param instance: An instance
:return: possible empty list of certificates
"""
if instance.signature:
if instance.signature.key_info:
return cert_from_key_info(instance.signature.key_info,
ignore_age=True)
return []
def extract_rsa_key_from_x509_cert(pem):
cert = saml2.cryptography.pki.load_pem_x509_certificate(pem)
return cert.public_key()
def pem_format(key):
return '\n'.join([
'-----BEGIN CERTIFICATE-----',
key,
'-----END CERTIFICATE-----'
]).encode('ascii')
def import_rsa_key_from_file(filename):
data = read_file(filename, 'rb')
key = saml2.cryptography.asymmetric.load_pem_private_key(data, None)
return key
def parse_xmlsec_output(output):
""" Parse the output from xmlsec to try to find out if the
command was successfull or not.
:param output: The output from Popen
:return: A boolean; True if the command was a success otherwise False
"""
for line in output.splitlines():
if line == 'OK':
return True
elif line == 'FAIL':
raise XmlsecError(output)
raise XmlsecError(output)
def sha1_digest(msg):
return hashlib.sha1(msg).digest()
class Signer(object):
"""Abstract base class for signing algorithms."""
def __init__(self, key):
self.key = key
def sign(self, msg, key):
"""Sign ``msg`` with ``key`` and return the signature."""
raise NotImplementedError
def verify(self, msg, sig, key):
"""Return True if ``sig`` is a valid signature for ``msg``."""
raise NotImplementedError
class RSASigner(Signer):
def __init__(self, digest, key=None):
Signer.__init__(self, key)
self.digest = digest
def sign(self, msg, key=None):
return saml2.cryptography.asymmetric.key_sign(
key or self.key, msg, self.digest)
def verify(self, msg, sig, key=None):
return saml2.cryptography.asymmetric.key_verify(
key or self.key, sig, msg, self.digest)
SIGNER_ALGS = {
SIG_RSA_SHA1: RSASigner(saml2.cryptography.asymmetric.hashes.SHA1()),
SIG_RSA_SHA224: RSASigner(saml2.cryptography.asymmetric.hashes.SHA224()),
SIG_RSA_SHA256: RSASigner(saml2.cryptography.asymmetric.hashes.SHA256()),
SIG_RSA_SHA384: RSASigner(saml2.cryptography.asymmetric.hashes.SHA384()),
SIG_RSA_SHA512: RSASigner(saml2.cryptography.asymmetric.hashes.SHA512()),
}
REQ_ORDER = [
'SAMLRequest',
'RelayState',
'SigAlg',
]
RESP_ORDER = [
'SAMLResponse',
'RelayState',
'SigAlg',
]
class RSACrypto(object):
def __init__(self, key):
self.key = key
def get_signer(self, sigalg, sigkey=None):
try:
signer = SIGNER_ALGS[sigalg]
except KeyError:
return None
else:
if sigkey:
signer.key = sigkey
else:
signer.key = self.key
return signer
def verify_redirect_signature(saml_msg, crypto, cert=None, sigkey=None):
"""
:param saml_msg: A dictionary with strings as values, *NOT* lists as
produced by parse_qs.
:param cert: A certificate to use when verifying the signature
:return: True, if signature verified
"""
try:
signer = crypto.get_signer(saml_msg['SigAlg'], sigkey)
except KeyError:
raise Unsupported('Signature algorithm: {alg}'.format(
alg=saml_msg['SigAlg']))
else:
if saml_msg['SigAlg'] in SIGNER_ALGS:
if 'SAMLRequest' in saml_msg:
_order = REQ_ORDER
elif 'SAMLResponse' in saml_msg:
_order = RESP_ORDER
else:
raise Unsupported(
'Verifying signature on something that should not be '
'signed')
_args = saml_msg.copy()
del _args['Signature'] # everything but the signature
string = '&'.join(
[parse.urlencode({k: _args[k]}) for k in _order if k in
_args]).encode('ascii')
if cert:
_key = extract_rsa_key_from_x509_cert(pem_format(cert))
else:
_key = sigkey
_sign = base64.b64decode(saml_msg['Signature'])
return bool(signer.verify(string, _sign, _key))
def make_str(txt):
if isinstance(txt, six.string_types):
return txt
else:
return txt.decode()
def read_cert_from_file(cert_file, cert_type):
""" Reads a certificate from a file. The assumption is that there is
only one certificate in the file
:param cert_file: The name of the file
:param cert_type: The certificate type
:return: A base64 encoded certificate as a string or the empty string
"""
if not cert_file:
return ''
if cert_type == 'pem':
_a = read_file(cert_file, 'rb').decode()
_b = _a.replace('\r\n', '\n')
lines = _b.split('\n')
for pattern in (
'-----BEGIN CERTIFICATE-----',
'-----BEGIN PUBLIC KEY-----'):
if pattern in lines:
lines = lines[lines.index(pattern) + 1:]
break
else:
raise CertificateError('Strange beginning of PEM file')
for pattern in (
'-----END CERTIFICATE-----',
'-----END PUBLIC KEY-----'):
if pattern in lines:
lines = lines[:lines.index(pattern)]
break
else:
raise CertificateError('Strange end of PEM file')
return make_str(''.join(lines).encode())
if cert_type in ['der', 'cer', 'crt']:
data = read_file(cert_file, 'rb')
_cert = base64.b64encode(data)
return make_str(_cert)
class CryptoBackend(object):
def version(self):
raise NotImplementedError()
def encrypt(self, text, recv_key, template, key_type):
raise NotImplementedError()
def encrypt_assertion(self, statement, enc_key, template, key_type, node_xpath):
raise NotImplementedError()
def decrypt(self, enctext, key_file, id_attr):
raise NotImplementedError()
def sign_statement(self, statement, node_name, key_file, node_id, id_attr):
raise NotImplementedError()
def validate_signature(self, enctext, cert_file, cert_type, node_name, node_id, id_attr):
raise NotImplementedError()
ASSERT_XPATH = ''.join([
'/*[local-name()=\'{name}\']'.format(name=n)
for n in ['Response', 'EncryptedAssertion', 'Assertion']
])
class CryptoBackendXmlSec1(CryptoBackend):
"""
CryptoBackend implementation using external binary 1 to sign
and verify XML documents.
"""
__DEBUG = 0
def __init__(self, xmlsec_binary, delete_tmpfiles=True, **kwargs):
CryptoBackend.__init__(self, **kwargs)
assert (isinstance(xmlsec_binary, six.string_types))
self.xmlsec = xmlsec_binary
self.delete_tmpfiles = delete_tmpfiles
try:
self.non_xml_crypto = RSACrypto(kwargs['rsa_key'])
except KeyError:
pass
def version(self):
com_list = [self.xmlsec, '--version']
pof = Popen(com_list, stderr=PIPE, stdout=PIPE)
content, _ = pof.communicate()
content = content.decode('ascii')
try:
return content.split(' ')[1]
except IndexError:
return ''
def encrypt(self, text, recv_key, template, session_key_type, xpath=''):
"""
:param text: The text to be compiled
:param recv_key: Filename of a file where the key resides
:param template: Filename of a file with the pre-encryption part
:param session_key_type: Type and size of a new session key
'des-192' generates a new 192 bits DES key for DES3 encryption
:param xpath: What should be encrypted
:return:
"""
logger.debug('Encryption input len: %d', len(text))
tmp = make_temp(text, decode=False, delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--encrypt',
'--pubkey-cert-pem', recv_key,
'--session-key', session_key_type,
'--xml-data', tmp.name,
]
if xpath:
com_list.extend(['--node-xpath', xpath])
try:
(_stdout, _stderr, output) = self._run_xmlsec(com_list, [template])
except XmlsecError as e:
six.raise_from(EncryptError(com_list), e)
return output
def encrypt_assertion(self, statement, enc_key, template, key_type='des-192', node_xpath=None, node_id=None):
"""
Will encrypt an assertion
:param statement: A XML document that contains the assertion to encrypt
:param enc_key: File name of a file containing the encryption key
:param template: A template for the encryption part to be added.
:param key_type: The type of session key to use.
:return: The encrypted text
"""
if six.PY2:
_str = unicode
else:
_str = str
if isinstance(statement, SamlBase):
statement = pre_encrypt_assertion(statement)
tmp = make_temp(_str(statement),
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
tmp2 = make_temp(_str(template),
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
if not node_xpath:
node_xpath = ASSERT_XPATH
com_list = [
self.xmlsec,
'--encrypt',
'--pubkey-cert-pem', enc_key,
'--session-key', key_type,
'--xml-data', tmp.name,
'--node-xpath', node_xpath,
]
if node_id:
com_list.extend(['--node-id', node_id])
try:
(_stdout, _stderr, output) = self._run_xmlsec(com_list, [tmp2.name])
except XmlsecError as e:
six.raise_from(EncryptError(com_list), e)
return output.decode('utf-8')
def decrypt(self, enctext, key_file, id_attr):
"""
:param enctext: XML document containing an encrypted part
:param key_file: The key to use for the decryption
:return: The decrypted document
"""
logger.debug('Decrypt input len: %d', len(enctext))
tmp = make_temp(enctext, decode=False, delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--decrypt',
'--privkey-pem', key_file,
'--id-attr:{id_attr}'.format(id_attr=id_attr),
ENC_KEY_CLASS,
]
try:
(_stdout, _stderr, output) = self._run_xmlsec(com_list, [tmp.name])
except XmlsecError as e:
six.raise_from(DecryptError(com_list), e)
return output.decode('utf-8')
def sign_statement(self, statement, node_name, key_file, node_id, id_attr):
"""
Sign an XML statement.
:param statement: The statement to be signed
:param node_name: string like 'urn:oasis:names:...:Assertion'
:param key_file: The file where the key can be found
:param node_id:
:param id_attr: The attribute name for the identifier, normally one of
'id','Id' or 'ID'
:return: The signed statement
"""
if isinstance(statement, SamlBase):
statement = str(statement)
tmp = make_temp(statement,
suffix=".xml",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--sign',
'--privkey-pem', key_file,
'--id-attr:{id_attr_name}'.format(id_attr_name=id_attr),
node_name,
]
if node_id:
com_list.extend(['--node-id', node_id])
try:
(stdout, stderr, output) = self._run_xmlsec(com_list, [tmp.name])
except XmlsecError as e:
raise SignatureError(com_list)
# this does not work if --store-signatures is used
if output:
return output.decode("utf-8")
if stdout:
return stdout.decode("utf-8")
raise SignatureError(stderr)
def validate_signature(self, signedtext, cert_file, cert_type, node_name, node_id, id_attr):
"""
Validate signature on XML document.
:param signedtext: The XML document as a string
:param cert_file: The public key that was used to sign the document
:param cert_type: The file type of the certificate
:param node_name: The name of the class that is signed
:param node_id: The identifier of the node
:param id_attr: The attribute name for the identifier, normally one of
'id','Id' or 'ID'
:return: Boolean True if the signature was correct otherwise False.
"""
if not isinstance(signedtext, six.binary_type):
signedtext = signedtext.encode('utf-8')
tmp = make_temp(signedtext,
suffix=".xml",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--verify',
'--enabled-reference-uris', 'empty,same-doc',
'--pubkey-cert-{type}'.format(type=cert_type), cert_file,
'--id-attr:{id_attr_name}'.format(id_attr_name=id_attr),
node_name,
]
if node_id:
com_list.extend(['--node-id', node_id])
try:
(_stdout, stderr, _output) = self._run_xmlsec(com_list, [tmp.name])
except XmlsecError as e:
six.raise_from(SignatureError(com_list), e)
return parse_xmlsec_output(stderr)
def _run_xmlsec(self, com_list, extra_args):
"""
Common code to invoke xmlsec and parse the output.
:param com_list: Key-value parameter list for xmlsec
:param extra_args: Positional parameters to be appended after all
key-value parameters
:result: Whatever xmlsec wrote to an --output temporary file
"""
with NamedTemporaryFile(suffix='.xml') as ntf:
com_list.extend(['--output', ntf.name])
com_list += extra_args
logger.debug('xmlsec command: %s', ' '.join(com_list))
pof = Popen(com_list, stderr=PIPE, stdout=PIPE)
p_out, p_err = pof.communicate()
p_out = p_out.decode()
p_err = p_err.decode()
if pof.returncode != 0:
errmsg = "returncode={code}\nerror={err}\noutput={out}".format(
code=pof.returncode, err=p_err, out=p_out
)
logger.error(errmsg)
raise XmlsecError(errmsg)
ntf.seek(0)
return p_out, p_err, ntf.read()
class CryptoBackendXMLSecurity(CryptoBackend):
"""
CryptoBackend implementation using pyXMLSecurity to sign and verify
XML documents.
Encrypt and decrypt is currently unsupported by pyXMLSecurity.
pyXMLSecurity uses lxml (libxml2) to parse XML data, but otherwise
try to get by with native Python code. It does native Python RSA
signatures, or alternatively PyKCS11 to offload cryptographic work
to an external PKCS#11 module.
"""
def __init__(self):
CryptoBackend.__init__(self)
def version(self):
# XXX if XMLSecurity.__init__ included a __version__, that would be
# better than static 0.0 here.
return 'XMLSecurity 0.0'
def sign_statement(self, statement, node_name, key_file, node_id, id_attr):
"""
Sign an XML statement.
The parameters actually used in this CryptoBackend
implementation are :
:param statement: XML as string
:param node_name: Name of the node to sign
:param key_file: xmlsec key_spec string(), filename,
'pkcs11://' URI or PEM data
:returns: Signed XML as string
"""
import xmlsec
import lxml.etree
xml = xmlsec.parse_xml(statement)
signed = xmlsec.sign(xml, key_file)
signed_str = lxml.etree.tostring(signed, xml_declaration=False, encoding="UTF-8")
if not isinstance(signed_str, six.string_types):
signed_str = signed_str.decode("utf-8")
return signed_str
def validate_signature(self, signedtext, cert_file, cert_type, node_name, node_id, id_attr):
"""
Validate signature on XML document.
The parameters actually used in this CryptoBackend
implementation are :
:param signedtext: The signed XML data as string
:param cert_file: xmlsec key_spec string(), filename,
'pkcs11://' URI or PEM data
:param cert_type: string, must be 'pem' for now
:returns: True on successful validation, False otherwise
"""
if cert_type != 'pem':
raise Unsupported('Only PEM certs supported here')
import xmlsec
xml = xmlsec.parse_xml(signedtext)
try:
return xmlsec.verify(xml, cert_file)
except xmlsec.XMLSigException:
return False
def security_context(conf):
""" Creates a security context based on the configuration
:param conf: The configuration, this is a Config instance
:return: A SecurityContext instance
"""
if not conf:
return None
try:
metadata = conf.metadata
except AttributeError:
metadata = None
try:
id_attr = conf.id_attr_name
except AttributeError:
id_attr = None
sec_backend = None
if conf.crypto_backend == 'xmlsec1':
xmlsec_binary = conf.xmlsec_binary
if not xmlsec_binary:
try:
_path = conf.xmlsec_path
except AttributeError:
_path = []
xmlsec_binary = get_xmlsec_binary(_path)
# verify that xmlsec is where it's supposed to be
if not os.path.exists(xmlsec_binary):
# if not os.access(, os.F_OK):
err_msg = 'xmlsec binary not found: {binary}'
err_msg = err_msg.format(binary=xmlsec_binary)
raise SigverError(err_msg)
crypto = _get_xmlsec_cryptobackend(xmlsec_binary,
delete_tmpfiles=conf.delete_tmpfiles)
_file_name = conf.getattr('key_file', '')
if _file_name:
try:
rsa_key = import_rsa_key_from_file(_file_name)
except Exception as err:
logger.error('Cannot import key from {file}: {err_msg}'.format(
file=_file_name, err_msg=err))
raise
else:
sec_backend = RSACrypto(rsa_key)
elif conf.crypto_backend == 'XMLSecurity':
# new and somewhat untested pyXMLSecurity crypto backend.
crypto = CryptoBackendXMLSecurity()
else:
err_msg = 'Unknown crypto_backend {backend}'
err_msg = err_msg.format(backend=conf.crypto_backend)
raise SigverError(err_msg)
enc_key_files = []
if conf.encryption_keypairs is not None:
for _encryption_keypair in conf.encryption_keypairs:
if 'key_file' in _encryption_keypair:
enc_key_files.append(_encryption_keypair['key_file'])
return SecurityContext(
crypto,
conf.key_file,
cert_file=conf.cert_file,
metadata=metadata,
only_use_keys_in_metadata=conf.only_use_keys_in_metadata,
cert_handler_extra_class=conf.cert_handler_extra_class,
generate_cert_info=conf.generate_cert_info,
tmp_cert_file=conf.tmp_cert_file,
tmp_key_file=conf.tmp_key_file,
validate_certificate=conf.validate_certificate,
enc_key_files=enc_key_files,
encryption_keypairs=conf.encryption_keypairs,
sec_backend=sec_backend,
id_attr=id_attr,
delete_tmpfiles=conf.delete_tmpfiles)
def encrypt_cert_from_item(item):
_encrypt_cert = None
try:
try:
_elem = extension_elements_to_elements(
item.extensions.extension_elements, [pefim, ds])
except:
_elem = extension_elements_to_elements(
item.extension_elements[0].children,
[pefim, ds])
for _tmp_elem in _elem:
if isinstance(_tmp_elem, SPCertEnc):
for _tmp_key_info in _tmp_elem.key_info:
if _tmp_key_info.x509_data is not None and len(
_tmp_key_info.x509_data) > 0:
_encrypt_cert = _tmp_key_info.x509_data[
0].x509_certificate.text
break
except Exception as _exception:
pass
if _encrypt_cert is not None:
if _encrypt_cert.find('-----BEGIN CERTIFICATE-----\n') == -1:
_encrypt_cert = '-----BEGIN CERTIFICATE-----\n' + _encrypt_cert
if _encrypt_cert.find('\n-----END CERTIFICATE-----') == -1:
_encrypt_cert = _encrypt_cert + '\n-----END CERTIFICATE-----'
return _encrypt_cert
class CertHandlerExtra(object):
def __init__(self):
pass
def use_generate_cert_func(self):
raise Exception('use_generate_cert_func function must be implemented')
def generate_cert(self, generate_cert_info, root_cert_string,
root_key_string):
raise Exception('generate_cert function must be implemented')
# Excepts to return (cert_string, key_string)
def use_validate_cert_func(self):
raise Exception('use_validate_cert_func function must be implemented')
def validate_cert(self, cert_str, root_cert_string, root_key_string):
raise Exception('validate_cert function must be implemented')
# Excepts to return True/False
class CertHandler(object):
def __init__(
self,
security_context,
cert_file=None, cert_type='pem',
key_file=None, key_type='pem',
generate_cert_info=None,
cert_handler_extra_class=None,
tmp_cert_file=None,
tmp_key_file=None,
verify_cert=False):
"""
Initiates the class for handling certificates. Enables the certificates
to either be a single certificate as base functionality or makes it
possible to generate a new certificate for each call to the function.
:param security_context:
:param cert_file:
:param cert_type:
:param key_file:
:param key_type:
:param generate_cert_info:
:param cert_handler_extra_class:
:param tmp_cert_file:
:param tmp_key_file:
:param verify_cert:
"""
self._verify_cert = False
self._generate_cert = False
# This cert do not have to be valid, it is just the last cert to be
# validated.
self._last_cert_verified = None
self._last_validated_cert = None
if cert_type == 'pem' and key_type == 'pem':
self._verify_cert = verify_cert is True
self._security_context = security_context
self._osw = OpenSSLWrapper()
if key_file and os.path.isfile(key_file):
self._key_str = self._osw.read_str_from_file(key_file, key_type)
else:
self._key_str = ''
if cert_file and os.path.isfile(cert_file):
self._cert_str = self._osw.read_str_from_file(cert_file,
cert_type)
else:
self._cert_str = ''
self._tmp_cert_str = self._cert_str
self._tmp_key_str = self._key_str
self._tmp_cert_file = tmp_cert_file
self._tmp_key_file = tmp_key_file
self._cert_info = None
self._generate_cert_func_active = False
if generate_cert_info is not None \
and len(self._cert_str) > 0 \
and len(self._key_str) > 0 \
and tmp_key_file is not None \
and tmp_cert_file is not None:
self._generate_cert = True
self._cert_info = generate_cert_info
self._cert_handler_extra_class = cert_handler_extra_class
def verify_cert(self, cert_file):
if self._verify_cert:
if cert_file and os.path.isfile(cert_file):
cert_str = self._osw.read_str_from_file(cert_file, 'pem')
else:
return False
self._last_validated_cert = cert_str
if self._cert_handler_extra_class is not None and \
self._cert_handler_extra_class.use_validate_cert_func():
self._cert_handler_extra_class.validate_cert(
cert_str, self._cert_str, self._key_str)
else:
valid, mess = self._osw.verify(self._cert_str, cert_str)
logger.info('CertHandler.verify_cert: %s', mess)
return valid
return True
def generate_cert(self):
return self._generate_cert
def update_cert(self, active=False, client_crt=None):
if (self._generate_cert and active) or client_crt is not None:
if client_crt is not None:
self._tmp_cert_str = client_crt
# No private key for signing
self._tmp_key_str = ''
elif self._cert_handler_extra_class is not None and \
self._cert_handler_extra_class.use_generate_cert_func():
(self._tmp_cert_str, self._tmp_key_str) = \
self._cert_handler_extra_class.generate_cert(
self._cert_info, self._cert_str, self._key_str)
else:
self._tmp_cert_str, self._tmp_key_str = self._osw \
.create_certificate(self._cert_info, request=True)
self._tmp_cert_str = self._osw.create_cert_signed_certificate(
self._cert_str, self._key_str, self._tmp_cert_str)
valid, mess = self._osw.verify(self._cert_str,
self._tmp_cert_str)
self._osw.write_str_to_file(self._tmp_cert_file, self._tmp_cert_str)
self._osw.write_str_to_file(self._tmp_key_file, self._tmp_key_str)
self._security_context.key_file = self._tmp_key_file
self._security_context.cert_file = self._tmp_cert_file
self._security_context.key_type = 'pem'
self._security_context.cert_type = 'pem'
self._security_context.my_cert = read_cert_from_file(
self._security_context.cert_file,
self._security_context.cert_type)
# How to get a rsa pub key fingerprint from a certificate
# openssl x509 -inform pem -noout -in server.crt -pubkey > publickey.pem
# openssl rsa -inform pem -noout -in publickey.pem -pubin -modulus
class SecurityContext(object):
DEFAULT_ID_ATTR_NAME = 'ID'
my_cert = None
def __init__(
self,
crypto,
key_file='', key_type='pem',
cert_file='', cert_type='pem',
metadata=None,
template='',
encrypt_key_type='des-192',
only_use_keys_in_metadata=False,
cert_handler_extra_class=None,
generate_cert_info=None,
tmp_cert_file=None, tmp_key_file=None,
validate_certificate=None,
enc_key_files=None, enc_key_type='pem',
encryption_keypairs=None,
enc_cert_type='pem',
sec_backend=None,
id_attr='',
delete_tmpfiles=True):
self.id_attr = id_attr or SecurityContext.DEFAULT_ID_ATTR_NAME
self.crypto = crypto
assert (isinstance(self.crypto, CryptoBackend))
if sec_backend:
assert (isinstance(sec_backend, RSACrypto))
self.sec_backend = sec_backend
# Your private key for signing
self.key_file = key_file
self.key_type = key_type
# Your public key for signing
self.cert_file = cert_file
self.cert_type = cert_type
# Your private key for encryption
self.enc_key_files = enc_key_files
self.enc_key_type = enc_key_type
# Your public key for encryption
self.encryption_keypairs = encryption_keypairs
self.enc_cert_type = enc_cert_type
self.my_cert = read_cert_from_file(cert_file, cert_type)
self.cert_handler = CertHandler(
self,
cert_file, cert_type,
key_file, key_type,
generate_cert_info,
cert_handler_extra_class,
tmp_cert_file,
tmp_key_file,
validate_certificate)
self.cert_handler.update_cert(True)
self.metadata = metadata
self.only_use_keys_in_metadata = only_use_keys_in_metadata
if not template:
this_dir, this_filename = os.path.split(__file__)
self.template = os.path.join(this_dir, 'xml_template', 'template.xml')
else:
self.template = template
self.encrypt_key_type = encrypt_key_type
self.delete_tmpfiles = delete_tmpfiles
def correctly_signed(self, xml, must=False):
logger.debug('verify correct signature')
return self.correctly_signed_response(xml, must)
def encrypt(self, text, recv_key='', template='', key_type=''):
"""
xmlsec encrypt --pubkey-pem pub-userkey.pem
--session-key aes128-cbc --xml-data doc-plain.xml
--output doc-encrypted.xml session-key-template.xml
:param text: Text to encrypt
:param recv_key: A file containing the receivers public key
:param template: A file containing the XMLSEC template
:param key_type: The type of session key to use
:result: An encrypted XML text
"""
if not key_type:
key_type = self.encrypt_key_type
if not template:
template = self.template
return self.crypto.encrypt(text, recv_key, template, key_type)
def encrypt_assertion(self, statement, enc_key, template, key_type='des-192', node_xpath=None):
"""
Will encrypt an assertion
:param statement: A XML document that contains the assertion to encrypt
:param enc_key: File name of a file containing the encryption key
:param template: A template for the encryption part to be added.
:param key_type: The type of session key to use.
:return: The encrypted text
"""
return self.crypto.encrypt_assertion(
statement, enc_key, template, key_type, node_xpath)
def decrypt_keys(self, enctext, keys=None, id_attr=''):
""" Decrypting an encrypted text by the use of a private key.
:param enctext: The encrypted text as a string
:param keys: Keys to try to decrypt enctext with
:param id_attr: The attribute name for the identifier, normally one of
'id','Id' or 'ID'
:return: The decrypted text
"""
key_files = []
if not isinstance(keys, list):
keys = [keys]
keys_filtered = (key for key in keys if key)
keys_encoded = (
key.encode("ascii") if not isinstance(key, six.binary_type) else key
for key in keys_filtered
)
key_files = list(
make_temp(key, decode=False, delete_tmpfiles=self.delete_tmpfiles)
for key in keys_encoded
)
key_file_names = list(tmp.name for tmp in key_files)
try:
dectext = self.decrypt(enctext, key_file=key_file_names, id_attr=id_attr)
except DecryptError as e:
raise
else:
return dectext
def decrypt(self, enctext, key_file=None, id_attr=''):
""" Decrypting an encrypted text by the use of a private key.
:param enctext: The encrypted text as a string
:return: The decrypted text
"""
if not id_attr:
id_attr = self.id_attr
if not isinstance(key_file, list):
key_file = [key_file]
key_files = [
key for key in itertools.chain(key_file, self.enc_key_files) if key
]
for key_file in key_files:
try:
dectext = self.crypto.decrypt(enctext, key_file, id_attr)
except XmlsecError as e:
continue
else:
if dectext:
return dectext
errmsg = "No key was able to decrypt the ciphertext. Keys tried: {keys}"
errmsg = errmsg.format(keys=key_files)
raise DecryptError(errmsg)
def verify_signature(self, signedtext, cert_file=None, cert_type='pem', node_name=NODE_NAME, node_id=None, id_attr=''):
""" Verifies the signature of a XML document.
:param signedtext: The XML document as a string
:param cert_file: The public key that was used to sign the document
:param cert_type: The file type of the certificate
:param node_name: The name of the class that is signed
:param node_id: The identifier of the node
:param id_attr: The attribute name for the identifier, normally one of
'id','Id' or 'ID'
:return: Boolean True if the signature was correct otherwise False.
"""
# This is only for testing purposes, otherwise when would you receive
# stuff that is signed with your key !?
if not cert_file:
cert_file = self.cert_file
cert_type = self.cert_type
if not id_attr:
id_attr = self.id_attr
return self.crypto.validate_signature(
signedtext,
cert_file=cert_file,
cert_type=cert_type,
node_name=node_name,
node_id=node_id,
id_attr=id_attr)
def _check_signature(self, decoded_xml, item, node_name=NODE_NAME, origdoc=None, id_attr='', must=False, only_valid_cert=False, issuer=None):
try:
_issuer = item.issuer.text.strip()
except AttributeError:
_issuer = None
if _issuer is None:
try:
_issuer = issuer.text.strip()
except AttributeError:
_issuer = None
# More trust in certs from metadata then certs in the XML document
if self.metadata:
try:
_certs = self.metadata.certs(_issuer, 'any', 'signing')
except KeyError:
_certs = []
certs = []
for cert in _certs:
if isinstance(cert, six.string_types):
content = pem_format(cert)
tmp = make_temp(content,
suffix=".pem",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
certs.append(tmp)
else:
certs.append(cert)
else:
certs = []
if not certs and not self.only_use_keys_in_metadata:
logger.debug('==== Certs from instance ====')
certs = [
make_temp(content=pem_format(cert),
suffix=".pem",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
for cert in cert_from_instance(item)
]
else:
logger.debug('==== Certs from metadata ==== %s: %s ====', _issuer, certs)
if not certs:
raise MissingKey(_issuer)
verified = False
last_pem_file = None
for pem_fd in certs:
try:
last_pem_file = pem_fd.name
if self.verify_signature(
decoded_xml,
pem_fd.name,
node_name=node_name,
node_id=item.id,
id_attr=id_attr):
verified = True
break
except XmlsecError as exc:
logger.error('check_sig: %s', exc)
pass
except Exception as exc:
logger.error('check_sig: %s', exc)
raise
if verified or only_valid_cert:
if not self.cert_handler.verify_cert(last_pem_file):
raise CertificateError('Invalid certificate!')
else:
raise SignatureError('Failed to verify signature')
return item
def check_signature(self, item, node_name=NODE_NAME, origdoc=None, id_attr='', must=False, issuer=None):
"""
:param item: Parsed entity
:param node_name: The name of the node/class/element that is signed
:param origdoc: The original XML string
:param id_attr: The attribute name for the identifier, normally one of
'id','Id' or 'ID'
:param must:
:return:
"""
return self._check_signature(
origdoc,
item,
node_name,
origdoc,
id_attr=id_attr,
must=must,
issuer=issuer)
def correctly_signed_message(self, decoded_xml, msgtype, must=False, origdoc=None, only_valid_cert=False):
"""Check if a request is correctly signed, if we have metadata for
the entity that sent the info use that, if not use the key that are in
the message if any.
:param decoded_xml: The SAML message as an XML infoset (a string)
:param msgtype: SAML protocol message type
:param must: Whether there must be a signature
:param origdoc:
:return:
"""
attr = '{type}_from_string'.format(type=msgtype)
_func = getattr(saml, attr, None)
_func = getattr(samlp, attr, _func)
msg = _func(decoded_xml)
if not msg:
raise TypeError('Not a {type}'.format(type=msgtype))
if not msg.signature:
if must:
err_msg = 'Required signature missing on {type}'
err_msg = err_msg.format(type=msgtype)
raise SignatureError(err_msg)
else:
return msg
return self._check_signature(
decoded_xml,
msg,
class_name(msg),
origdoc,
must=must,
only_valid_cert=only_valid_cert)
def correctly_signed_authn_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authn_request', must, origdoc, only_valid_cert=only_valid_cert)
def correctly_signed_authn_query(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authn_query', must, origdoc, only_valid_cert)
def correctly_signed_logout_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'logout_request', must, origdoc, only_valid_cert)
def correctly_signed_logout_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'logout_response', must, origdoc, only_valid_cert)
def correctly_signed_attribute_query(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'attribute_query', must, origdoc, only_valid_cert)
def correctly_signed_authz_decision_query(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authz_decision_query', must, origdoc, only_valid_cert)
def correctly_signed_authz_decision_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authz_decision_response', must, origdoc, only_valid_cert)
def correctly_signed_name_id_mapping_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'name_id_mapping_request', must, origdoc, only_valid_cert)
def correctly_signed_name_id_mapping_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'name_id_mapping_response', must, origdoc, only_valid_cert)
def correctly_signed_artifact_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'artifact_request', must, origdoc, only_valid_cert)
def correctly_signed_artifact_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'artifact_response', must, origdoc, only_valid_cert)
def correctly_signed_manage_name_id_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'manage_name_id_request', must, origdoc, only_valid_cert)
def correctly_signed_manage_name_id_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'manage_name_id_response', must, origdoc, only_valid_cert)
def correctly_signed_assertion_id_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'assertion_id_request', must, origdoc, only_valid_cert)
def correctly_signed_assertion_id_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'assertion', must, origdoc, only_valid_cert)
def correctly_signed_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, require_response_signature=False, **kwargs):
""" Check if a instance is correctly signed, if we have metadata for
the IdP that sent the info use that, if not use the key that are in
the message if any.
:param decoded_xml: The SAML message as a XML string
:param must: Whether there must be a signature
:param origdoc:
:param only_valid_cert:
:param require_response_signature:
:return: None if the signature can not be verified otherwise an instance
"""
response = samlp.any_response_from_string(decoded_xml)
if not response:
raise TypeError('Not a Response')
if response.signature:
if 'do_not_verify' in kwargs:
pass
else:
self._check_signature(decoded_xml, response,
class_name(response), origdoc)
elif require_response_signature:
raise SignatureError('Signature missing for response')
return response
def sign_statement_using_xmlsec(self, statement, **kwargs):
""" Deprecated function. See sign_statement(). """
return self.sign_statement(statement, **kwargs)
def sign_statement(self, statement, node_name, key=None, key_file=None, node_id=None, id_attr=''):
"""Sign a SAML statement.
:param statement: The statement to be signed
:param node_name: string like 'urn:oasis:names:...:Assertion'
:param key: The key to be used for the signing, either this or
:param key_file: The file where the key can be found
:param node_id:
:param id_attr: The attribute name for the identifier, normally one of
'id','Id' or 'ID'
:return: The signed statement
"""
if not id_attr:
id_attr = self.id_attr
if not key_file and key:
content = str(key).encode()
tmp = make_temp(content, suffix=".pem", delete_tmpfiles=self.delete_tmpfiles)
key_file = tmp.name
if not key and not key_file:
key_file = self.key_file
return self.crypto.sign_statement(
statement,
node_name,
key_file,
node_id,
id_attr)
def sign_assertion_using_xmlsec(self, statement, **kwargs):
""" Deprecated function. See sign_assertion(). """
return self.sign_statement(
statement, class_name(saml.Assertion()), **kwargs)
def sign_assertion(self, statement, **kwargs):
"""Sign a SAML assertion.
See sign_statement() for the kwargs.
:param statement: The statement to be signed
:return: The signed statement
"""
return self.sign_statement(
statement, class_name(saml.Assertion()), **kwargs)
def sign_attribute_query_using_xmlsec(self, statement, **kwargs):
""" Deprecated function. See sign_attribute_query(). """
return self.sign_attribute_query(statement, **kwargs)
def sign_attribute_query(self, statement, **kwargs):
"""Sign a SAML attribute query.
See sign_statement() for the kwargs.
:param statement: The statement to be signed
:return: The signed statement
"""
return self.sign_statement(
statement, class_name(samlp.AttributeQuery()), **kwargs)
def multiple_signatures(self, statement, to_sign, key=None, key_file=None, sign_alg=None, digest_alg=None):
"""
Sign multiple parts of a statement
:param statement: The statement that should be sign, this is XML text
:param to_sign: A list of (items, id, id attribute name) tuples that
specifies what to sign
:param key: A key that should be used for doing the signing
:param key_file: A file that contains the key to be used
:return: A possibly multiple signed statement
"""
for (item, sid, id_attr) in to_sign:
if not sid:
if not item.id:
sid = item.id = sid()
else:
sid = item.id
if not item.signature:
item.signature = pre_signature_part(
sid,
self.cert_file,
sign_alg=sign_alg,
digest_alg=digest_alg)
statement = self.sign_statement(
statement,
class_name(item),
key=key,
key_file=key_file,
node_id=sid,
id_attr=id_attr)
return statement
def pre_signature_part(ident, public_key=None, identifier=None, digest_alg=None, sign_alg=None):
"""
If an assertion is to be signed the signature part has to be preset
with which algorithms to be used, this function returns such a
preset part.
:param ident: The identifier of the assertion, so you know which assertion
was signed
:param public_key: The base64 part of a PEM file
:param identifier:
:return: A preset signature part
"""
if not digest_alg:
digest_alg = ds.DefaultSignature().get_digest_alg()
if not sign_alg:
sign_alg = ds.DefaultSignature().get_sign_alg()
signature_method = ds.SignatureMethod(algorithm=sign_alg)
canonicalization_method = ds.CanonicalizationMethod(
algorithm=ds.ALG_EXC_C14N)
trans0 = ds.Transform(algorithm=ds.TRANSFORM_ENVELOPED)
trans1 = ds.Transform(algorithm=ds.ALG_EXC_C14N)
transforms = ds.Transforms(transform=[trans0, trans1])
digest_method = ds.DigestMethod(algorithm=digest_alg)
reference = ds.Reference(
uri='#{id}'.format(id=ident),
digest_value=ds.DigestValue(),
transforms=transforms,
digest_method=digest_method)
signed_info = ds.SignedInfo(
signature_method=signature_method,
canonicalization_method=canonicalization_method,
reference=reference)
signature = ds.Signature(
signed_info=signed_info,
signature_value=ds.SignatureValue())
if identifier:
signature.id = 'Signature{n}'.format(n=identifier)
if public_key:
x509_data = ds.X509Data(
x509_certificate=[ds.X509Certificate(text=public_key)])
key_info = ds.KeyInfo(x509_data=x509_data)
signature.key_info = key_info
return signature
# <?xml version="1.0" encoding="UTF-8"?>
# <EncryptedData Id="ED" Type="http://www.w3.org/2001/04/xmlenc#Element"
# xmlns="http://www.w3.org/2001/04/xmlenc#">
# <EncryptionMethod Algorithm="http://www.w3
# .org/2001/04/xmlenc#tripledes-cbc"/>
# <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
# <EncryptedKey Id="EK" xmlns="http://www.w3.org/2001/04/xmlenc#">
# <EncryptionMethod Algorithm="http://www.w3
# .org/2001/04/xmlenc#rsa-1_5"/>
# <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
# <ds:KeyName>my-rsa-key</ds:KeyName>
# </ds:KeyInfo>
# <CipherData>
# <CipherValue>
# </CipherValue>
# </CipherData>
# <ReferenceList>
# <DataReference URI="#ED"/>
# </ReferenceList>
# </EncryptedKey>
# </ds:KeyInfo>
# <CipherData>
# <CipherValue>
# </CipherValue>
# </CipherData>
# </EncryptedData>
def pre_encryption_part(msg_enc=TRIPLE_DES_CBC, key_enc=RSA_1_5, key_name='my-rsa-key'):
"""
:param msg_enc:
:param key_enc:
:param key_name:
:return:
"""
msg_encryption_method = EncryptionMethod(algorithm=msg_enc)
key_encryption_method = EncryptionMethod(algorithm=key_enc)
encrypted_key = EncryptedKey(
id='EK',
encryption_method=key_encryption_method,
key_info=ds.KeyInfo(
key_name=ds.KeyName(text=key_name)),
cipher_data=CipherData(
cipher_value=CipherValue(text='')))
key_info = ds.KeyInfo(encrypted_key=encrypted_key)
encrypted_data = EncryptedData(
id='ED',
type='http://www.w3.org/2001/04/xmlenc#Element',
encryption_method=msg_encryption_method,
key_info=key_info,
cipher_data=CipherData(cipher_value=CipherValue(text='')))
return encrypted_data
def pre_encrypt_assertion(response):
"""
Move the assertion to within a encrypted_assertion
:param response: The response with one assertion
:return: The response but now with the assertion within an
encrypted_assertion.
"""
assertion = response.assertion
response.assertion = None
response.encrypted_assertion = EncryptedAssertion()
if assertion is not None:
if isinstance(assertion, list):
response.encrypted_assertion.add_extension_elements(assertion)
else:
response.encrypted_assertion.add_extension_element(assertion)
return response
def response_factory(sign=False, encrypt=False, sign_alg=None, digest_alg=None,
**kwargs):
response = samlp.Response(id=sid(), version=VERSION,
issue_instant=instant())
if sign:
response.signature = pre_signature_part(
kwargs['id'], sign_alg=sign_alg, digest_alg=digest_alg)
if encrypt:
pass
for key, val in kwargs.items():
setattr(response, key, val)
return response
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--list-sigalgs', dest='listsigalgs',
action='store_true',
help='List implemented signature algorithms')
args = parser.parse_args()
if args.listsigalgs:
print('\n'.join([key for key, value in SIGNER_ALGS.items()]))
| ./CrossVul/dataset_final_sorted/CWE-347/py/bad_4590_0 |
crossvul-python_data_good_3997_0 | # -*- coding: utf-8 -*-
#
# fastecdsa documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 15 20:02:52 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
from datetime import datetime
import os
import sys
from unittest import mock
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))
MOCK_MODULES = ['fastecdsa._ecdsa', 'fastecdsa.curvemath']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.mathjax', 'sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'fastecdsa'
copyright = '{}, Anton Kueltz'.format(datetime.now().year)
author = 'Anton Kueltz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1'
# The full version, including alpha/beta/rc tags.
release = '2.1.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'fastecdsadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'fastecdsa.tex', 'fastecdsa Documentation',
'Anton Kueltz', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fastecdsa', 'fastecdsa Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'fastecdsa', 'fastecdsa Documentation',
author, 'fastecdsa', 'One line description of project.',
'Miscellaneous'),
]
| ./CrossVul/dataset_final_sorted/CWE-347/py/good_3997_0 |
crossvul-python_data_bad_1888_2 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-347/py/bad_1888_2 |
crossvul-python_data_good_4590_0 | """ Functions connected to signing and verifying.
Based on the use of xmlsec1 binaries and not the python xmlsec module.
"""
from OpenSSL import crypto
import base64
import hashlib
import itertools
import logging
import os
import six
from time import mktime
from six.moves.urllib import parse
import saml2.cryptography.asymmetric
import saml2.cryptography.pki
from tempfile import NamedTemporaryFile
from subprocess import Popen
from subprocess import PIPE
from saml2 import samlp
from saml2 import SamlBase
from saml2 import SAMLError
from saml2 import extension_elements_to_elements
from saml2 import class_name
from saml2 import saml
from saml2 import ExtensionElement
from saml2 import VERSION
from saml2.cert import OpenSSLWrapper
from saml2.extension import pefim
from saml2.extension.pefim import SPCertEnc
from saml2.saml import EncryptedAssertion
import saml2.xmldsig as ds
from saml2.s_utils import sid
from saml2.s_utils import Unsupported
from saml2.time_util import instant
from saml2.time_util import str_to_time
from saml2.xmldsig import SIG_RSA_SHA1
from saml2.xmldsig import SIG_RSA_SHA224
from saml2.xmldsig import SIG_RSA_SHA256
from saml2.xmldsig import SIG_RSA_SHA384
from saml2.xmldsig import SIG_RSA_SHA512
from saml2.xmlenc import EncryptionMethod
from saml2.xmlenc import EncryptedKey
from saml2.xmlenc import CipherData
from saml2.xmlenc import CipherValue
from saml2.xmlenc import EncryptedData
logger = logging.getLogger(__name__)
SIG = '{{{ns}#}}{attribute}'.format(ns=ds.NAMESPACE, attribute='Signature')
RSA_1_5 = 'http://www.w3.org/2001/04/xmlenc#rsa-1_5'
TRIPLE_DES_CBC = 'http://www.w3.org/2001/04/xmlenc#tripledes-cbc'
class SigverError(SAMLError):
pass
class CertificateTooOld(SigverError):
pass
class XmlsecError(SigverError):
pass
class MissingKey(SigverError):
pass
class DecryptError(XmlsecError):
pass
class EncryptError(XmlsecError):
pass
class SignatureError(XmlsecError):
pass
class BadSignature(SigverError):
"""The signature is invalid."""
pass
class CertificateError(SigverError):
pass
def read_file(*args, **kwargs):
with open(*args, **kwargs) as handler:
return handler.read()
def rm_xmltag(statement):
XMLTAG = "<?xml version='1.0'?>"
PREFIX1 = "<?xml version='1.0' encoding='UTF-8'?>"
PREFIX2 = '<?xml version="1.0" encoding="UTF-8"?>'
try:
_t = statement.startswith(XMLTAG)
except TypeError:
statement = statement.decode()
_t = statement.startswith(XMLTAG)
if _t:
statement = statement[len(XMLTAG):]
if statement[0] == '\n':
statement = statement[1:]
elif statement.startswith(PREFIX1):
statement = statement[len(PREFIX1):]
if statement[0] == '\n':
statement = statement[1:]
elif statement.startswith(PREFIX2):
statement = statement[len(PREFIX2):]
if statement[0] == '\n':
statement = statement[1:]
return statement
def signed(item):
"""
Is any part of the document signed ?
:param item: A Samlbase instance
:return: True if some part of it is signed
"""
if SIG in item.c_children.keys() and item.signature:
return True
else:
for prop in item.c_child_order:
child = getattr(item, prop, None)
if isinstance(child, list):
for chi in child:
if signed(chi):
return True
elif child and signed(child):
return True
return False
def get_xmlsec_binary(paths=None):
"""
Tries to find the xmlsec1 binary.
:param paths: Non-system path paths which should be searched when
looking for xmlsec1
:return: full name of the xmlsec1 binary found. If no binaries are
found then an exception is raised.
"""
if os.name == 'posix':
bin_name = ['xmlsec1']
elif os.name == 'nt':
bin_name = ['xmlsec.exe', 'xmlsec1.exe']
else: # Default !?
bin_name = ['xmlsec1']
if paths:
for bname in bin_name:
for path in paths:
fil = os.path.join(path, bname)
try:
if os.lstat(fil):
return fil
except OSError:
pass
for path in os.environ['PATH'].split(os.pathsep):
for bname in bin_name:
fil = os.path.join(path, bname)
try:
if os.lstat(fil):
return fil
except OSError:
pass
raise SigverError('Cannot find {binary}'.format(binary=bin_name))
def _get_xmlsec_cryptobackend(path=None, search_paths=None, delete_tmpfiles=True):
"""
Initialize a CryptoBackendXmlSec1 crypto backend.
This function is now internal to this module.
"""
if path is None:
path = get_xmlsec_binary(paths=search_paths)
return CryptoBackendXmlSec1(path, delete_tmpfiles=delete_tmpfiles)
NODE_NAME = 'urn:oasis:names:tc:SAML:2.0:assertion:Assertion'
ENC_NODE_NAME = 'urn:oasis:names:tc:SAML:2.0:assertion:EncryptedAssertion'
ENC_KEY_CLASS = 'EncryptedKey'
def _make_vals(val, klass, seccont, klass_inst=None, prop=None, part=False,
base64encode=False, elements_to_sign=None):
"""
Creates a class instance with a specified value, the specified
class instance may be a value on a property in a defined class instance.
:param val: The value
:param klass: The value class
:param klass_inst: The class instance which has a property on which
what this function returns is a value.
:param prop: The property which the value should be assigned to.
:param part: If the value is one of a possible list of values it should be
handled slightly different compared to if it isn't.
:return: Value class instance
"""
cinst = None
if isinstance(val, dict):
cinst = _instance(klass, val, seccont, base64encode=base64encode,
elements_to_sign=elements_to_sign)
else:
try:
cinst = klass().set_text(val)
except ValueError:
if not part:
cis = [
_make_vals(
sval,
klass,
seccont,
klass_inst,
prop,
True,
base64encode,
elements_to_sign)
for sval in val
]
setattr(klass_inst, prop, cis)
else:
raise
if part:
return cinst
else:
if cinst:
cis = [cinst]
setattr(klass_inst, prop, cis)
def _instance(klass, ava, seccont, base64encode=False, elements_to_sign=None):
instance = klass()
for prop in instance.c_attributes.values():
if prop in ava:
if isinstance(ava[prop], bool):
setattr(instance, prop, str(ava[prop]).encode())
elif isinstance(ava[prop], int):
setattr(instance, prop, str(ava[prop]))
else:
setattr(instance, prop, ava[prop])
if 'text' in ava:
instance.set_text(ava['text'], base64encode)
for prop, klassdef in instance.c_children.values():
if prop in ava:
if isinstance(klassdef, list):
# means there can be a list of values
_make_vals(ava[prop], klassdef[0], seccont, instance, prop,
base64encode=base64encode,
elements_to_sign=elements_to_sign)
else:
cis = _make_vals(ava[prop], klassdef, seccont, instance, prop,
True, base64encode, elements_to_sign)
setattr(instance, prop, cis)
if 'extension_elements' in ava:
for item in ava['extension_elements']:
instance.extension_elements.append(
ExtensionElement(item['tag']).loadd(item))
if 'extension_attributes' in ava:
for key, val in ava['extension_attributes'].items():
instance.extension_attributes[key] = val
if 'signature' in ava:
elements_to_sign.append((class_name(instance), instance.id))
return instance
def signed_instance_factory(instance, seccont, elements_to_sign=None):
"""
:param instance: The instance to be signed or not
:param seccont: The security context
:param elements_to_sign: Which parts if any that should be signed
:return: A class instance if not signed otherwise a string
"""
if elements_to_sign:
signed_xml = instance
if not isinstance(instance, six.string_types):
signed_xml = instance.to_string()
for (node_name, nodeid) in elements_to_sign:
signed_xml = seccont.sign_statement(
signed_xml, node_name=node_name, node_id=nodeid)
return signed_xml
else:
return instance
def make_temp(content, suffix="", decode=True, delete_tmpfiles=True):
"""
Create a temporary file with the given content.
This is needed by xmlsec in some cases where only strings exist when files
are expected.
:param content: The information to be placed in the file
:param suffix: The temporary file might have to have a specific
suffix in certain circumstances.
:param decode: The input content might be base64 coded. If so it
must, in some cases, be decoded before being placed in the file.
:param delete_tmpfiles: Whether to keep the tmp files or delete them when they are
no longer in use
:return: 2-tuple with file pointer ( so the calling function can
close the file) and filename (which is for instance needed by the
xmlsec function).
"""
content_encoded = (
content.encode("utf-8") if not isinstance(content, six.binary_type) else content
)
content_raw = base64.b64decode(content_encoded) if decode else content_encoded
ntf = NamedTemporaryFile(suffix=suffix, delete=delete_tmpfiles)
ntf.write(content_raw)
ntf.seek(0)
return ntf
def split_len(seq, length):
return [seq[i:i + length] for i in range(0, len(seq), length)]
M2_TIME_FORMAT = '%b %d %H:%M:%S %Y'
def to_time(_time):
assert _time.endswith(' GMT')
_time = _time[:-4]
return mktime(str_to_time(_time, M2_TIME_FORMAT))
def active_cert(key):
"""
Verifies that a key is active that is present time is after not_before
and before not_after.
:param key: The Key
:return: True if the key is active else False
"""
try:
cert_str = pem_format(key)
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
assert cert.has_expired() == 0
assert not OpenSSLWrapper().certificate_not_valid_yet(cert)
return True
except AssertionError:
return False
except AttributeError:
return False
def cert_from_key_info(key_info, ignore_age=False):
""" Get all X509 certs from a KeyInfo instance. Care is taken to make sure
that the certs are continues sequences of bytes.
All certificates appearing in an X509Data element MUST relate to the
validation key by either containing it or being part of a certification
chain that terminates in a certificate containing the validation key.
:param key_info: The KeyInfo instance
:return: A possibly empty list of certs
"""
res = []
for x509_data in key_info.x509_data:
x509_certificate = x509_data.x509_certificate
cert = x509_certificate.text.strip()
cert = '\n'.join(split_len(''.join([s.strip() for s in
cert.split()]), 64))
if ignore_age or active_cert(cert):
res.append(cert)
else:
logger.info('Inactive cert')
return res
def cert_from_key_info_dict(key_info, ignore_age=False):
""" Get all X509 certs from a KeyInfo dictionary. Care is taken to make sure
that the certs are continues sequences of bytes.
All certificates appearing in an X509Data element MUST relate to the
validation key by either containing it or being part of a certification
chain that terminates in a certificate containing the validation key.
:param key_info: The KeyInfo dictionary
:return: A possibly empty list of certs in their text representation
"""
res = []
if 'x509_data' not in key_info:
return res
for x509_data in key_info['x509_data']:
x509_certificate = x509_data['x509_certificate']
cert = x509_certificate['text'].strip()
cert = '\n'.join(split_len(''.join(
[s.strip() for s in cert.split()]), 64))
if ignore_age or active_cert(cert):
res.append(cert)
else:
logger.info('Inactive cert')
return res
def cert_from_instance(instance):
""" Find certificates that are part of an instance
:param instance: An instance
:return: possible empty list of certificates
"""
if instance.signature:
if instance.signature.key_info:
return cert_from_key_info(instance.signature.key_info,
ignore_age=True)
return []
def extract_rsa_key_from_x509_cert(pem):
cert = saml2.cryptography.pki.load_pem_x509_certificate(pem)
return cert.public_key()
def pem_format(key):
return '\n'.join([
'-----BEGIN CERTIFICATE-----',
key,
'-----END CERTIFICATE-----'
]).encode('ascii')
def import_rsa_key_from_file(filename):
data = read_file(filename, 'rb')
key = saml2.cryptography.asymmetric.load_pem_private_key(data, None)
return key
def parse_xmlsec_output(output):
""" Parse the output from xmlsec to try to find out if the
command was successfull or not.
:param output: The output from Popen
:return: A boolean; True if the command was a success otherwise False
"""
for line in output.splitlines():
if line == 'OK':
return True
elif line == 'FAIL':
raise XmlsecError(output)
raise XmlsecError(output)
def sha1_digest(msg):
return hashlib.sha1(msg).digest()
class Signer(object):
"""Abstract base class for signing algorithms."""
def __init__(self, key):
self.key = key
def sign(self, msg, key):
"""Sign ``msg`` with ``key`` and return the signature."""
raise NotImplementedError
def verify(self, msg, sig, key):
"""Return True if ``sig`` is a valid signature for ``msg``."""
raise NotImplementedError
class RSASigner(Signer):
def __init__(self, digest, key=None):
Signer.__init__(self, key)
self.digest = digest
def sign(self, msg, key=None):
return saml2.cryptography.asymmetric.key_sign(
key or self.key, msg, self.digest)
def verify(self, msg, sig, key=None):
return saml2.cryptography.asymmetric.key_verify(
key or self.key, sig, msg, self.digest)
SIGNER_ALGS = {
SIG_RSA_SHA1: RSASigner(saml2.cryptography.asymmetric.hashes.SHA1()),
SIG_RSA_SHA224: RSASigner(saml2.cryptography.asymmetric.hashes.SHA224()),
SIG_RSA_SHA256: RSASigner(saml2.cryptography.asymmetric.hashes.SHA256()),
SIG_RSA_SHA384: RSASigner(saml2.cryptography.asymmetric.hashes.SHA384()),
SIG_RSA_SHA512: RSASigner(saml2.cryptography.asymmetric.hashes.SHA512()),
}
REQ_ORDER = [
'SAMLRequest',
'RelayState',
'SigAlg',
]
RESP_ORDER = [
'SAMLResponse',
'RelayState',
'SigAlg',
]
class RSACrypto(object):
def __init__(self, key):
self.key = key
def get_signer(self, sigalg, sigkey=None):
try:
signer = SIGNER_ALGS[sigalg]
except KeyError:
return None
else:
if sigkey:
signer.key = sigkey
else:
signer.key = self.key
return signer
def verify_redirect_signature(saml_msg, crypto, cert=None, sigkey=None):
"""
:param saml_msg: A dictionary with strings as values, *NOT* lists as
produced by parse_qs.
:param cert: A certificate to use when verifying the signature
:return: True, if signature verified
"""
try:
signer = crypto.get_signer(saml_msg['SigAlg'], sigkey)
except KeyError:
raise Unsupported('Signature algorithm: {alg}'.format(
alg=saml_msg['SigAlg']))
else:
if saml_msg['SigAlg'] in SIGNER_ALGS:
if 'SAMLRequest' in saml_msg:
_order = REQ_ORDER
elif 'SAMLResponse' in saml_msg:
_order = RESP_ORDER
else:
raise Unsupported(
'Verifying signature on something that should not be '
'signed')
_args = saml_msg.copy()
del _args['Signature'] # everything but the signature
string = '&'.join(
[parse.urlencode({k: _args[k]}) for k in _order if k in
_args]).encode('ascii')
if cert:
_key = extract_rsa_key_from_x509_cert(pem_format(cert))
else:
_key = sigkey
_sign = base64.b64decode(saml_msg['Signature'])
return bool(signer.verify(string, _sign, _key))
def make_str(txt):
if isinstance(txt, six.string_types):
return txt
else:
return txt.decode()
def read_cert_from_file(cert_file, cert_type):
""" Reads a certificate from a file. The assumption is that there is
only one certificate in the file
:param cert_file: The name of the file
:param cert_type: The certificate type
:return: A base64 encoded certificate as a string or the empty string
"""
if not cert_file:
return ''
if cert_type == 'pem':
_a = read_file(cert_file, 'rb').decode()
_b = _a.replace('\r\n', '\n')
lines = _b.split('\n')
for pattern in (
'-----BEGIN CERTIFICATE-----',
'-----BEGIN PUBLIC KEY-----'):
if pattern in lines:
lines = lines[lines.index(pattern) + 1:]
break
else:
raise CertificateError('Strange beginning of PEM file')
for pattern in (
'-----END CERTIFICATE-----',
'-----END PUBLIC KEY-----'):
if pattern in lines:
lines = lines[:lines.index(pattern)]
break
else:
raise CertificateError('Strange end of PEM file')
return make_str(''.join(lines).encode())
if cert_type in ['der', 'cer', 'crt']:
data = read_file(cert_file, 'rb')
_cert = base64.b64encode(data)
return make_str(_cert)
class CryptoBackend(object):
def version(self):
raise NotImplementedError()
def encrypt(self, text, recv_key, template, key_type):
raise NotImplementedError()
def encrypt_assertion(self, statement, enc_key, template, key_type, node_xpath):
raise NotImplementedError()
def decrypt(self, enctext, key_file, id_attr):
raise NotImplementedError()
def sign_statement(self, statement, node_name, key_file, node_id, id_attr):
raise NotImplementedError()
def validate_signature(self, enctext, cert_file, cert_type, node_name, node_id, id_attr):
raise NotImplementedError()
ASSERT_XPATH = ''.join([
'/*[local-name()=\'{name}\']'.format(name=n)
for n in ['Response', 'EncryptedAssertion', 'Assertion']
])
class CryptoBackendXmlSec1(CryptoBackend):
"""
CryptoBackend implementation using external binary 1 to sign
and verify XML documents.
"""
__DEBUG = 0
def __init__(self, xmlsec_binary, delete_tmpfiles=True, **kwargs):
CryptoBackend.__init__(self, **kwargs)
assert (isinstance(xmlsec_binary, six.string_types))
self.xmlsec = xmlsec_binary
self.delete_tmpfiles = delete_tmpfiles
try:
self.non_xml_crypto = RSACrypto(kwargs['rsa_key'])
except KeyError:
pass
def version(self):
com_list = [self.xmlsec, '--version']
pof = Popen(com_list, stderr=PIPE, stdout=PIPE)
content, _ = pof.communicate()
content = content.decode('ascii')
try:
return content.split(' ')[1]
except IndexError:
return ''
def encrypt(self, text, recv_key, template, session_key_type, xpath=''):
"""
:param text: The text to be compiled
:param recv_key: Filename of a file where the key resides
:param template: Filename of a file with the pre-encryption part
:param session_key_type: Type and size of a new session key
'des-192' generates a new 192 bits DES key for DES3 encryption
:param xpath: What should be encrypted
:return:
"""
logger.debug('Encryption input len: %d', len(text))
tmp = make_temp(text, decode=False, delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--encrypt',
'--pubkey-cert-pem', recv_key,
'--session-key', session_key_type,
'--xml-data', tmp.name,
]
if xpath:
com_list.extend(['--node-xpath', xpath])
try:
(_stdout, _stderr, output) = self._run_xmlsec(com_list, [template])
except XmlsecError as e:
six.raise_from(EncryptError(com_list), e)
return output
def encrypt_assertion(self, statement, enc_key, template, key_type='des-192', node_xpath=None, node_id=None):
"""
Will encrypt an assertion
:param statement: A XML document that contains the assertion to encrypt
:param enc_key: File name of a file containing the encryption key
:param template: A template for the encryption part to be added.
:param key_type: The type of session key to use.
:return: The encrypted text
"""
if six.PY2:
_str = unicode
else:
_str = str
if isinstance(statement, SamlBase):
statement = pre_encrypt_assertion(statement)
tmp = make_temp(_str(statement),
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
tmp2 = make_temp(_str(template),
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
if not node_xpath:
node_xpath = ASSERT_XPATH
com_list = [
self.xmlsec,
'--encrypt',
'--pubkey-cert-pem', enc_key,
'--session-key', key_type,
'--xml-data', tmp.name,
'--node-xpath', node_xpath,
]
if node_id:
com_list.extend(['--node-id', node_id])
try:
(_stdout, _stderr, output) = self._run_xmlsec(com_list, [tmp2.name])
except XmlsecError as e:
six.raise_from(EncryptError(com_list), e)
return output.decode('utf-8')
def decrypt(self, enctext, key_file, id_attr):
"""
:param enctext: XML document containing an encrypted part
:param key_file: The key to use for the decryption
:return: The decrypted document
"""
logger.debug('Decrypt input len: %d', len(enctext))
tmp = make_temp(enctext, decode=False, delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--decrypt',
'--privkey-pem', key_file,
'--id-attr:{id_attr}'.format(id_attr=id_attr),
ENC_KEY_CLASS,
]
try:
(_stdout, _stderr, output) = self._run_xmlsec(com_list, [tmp.name])
except XmlsecError as e:
six.raise_from(DecryptError(com_list), e)
return output.decode('utf-8')
def sign_statement(self, statement, node_name, key_file, node_id, id_attr):
"""
Sign an XML statement.
:param statement: The statement to be signed
:param node_name: string like 'urn:oasis:names:...:Assertion'
:param key_file: The file where the key can be found
:param node_id:
:param id_attr: The attribute name for the identifier, normally one of
'id','Id' or 'ID'
:return: The signed statement
"""
if isinstance(statement, SamlBase):
statement = str(statement)
tmp = make_temp(statement,
suffix=".xml",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--sign',
'--privkey-pem', key_file,
'--id-attr:{id_attr_name}'.format(id_attr_name=id_attr),
node_name,
]
if node_id:
com_list.extend(['--node-id', node_id])
try:
(stdout, stderr, output) = self._run_xmlsec(com_list, [tmp.name])
except XmlsecError as e:
raise SignatureError(com_list)
# this does not work if --store-signatures is used
if output:
return output.decode("utf-8")
if stdout:
return stdout.decode("utf-8")
raise SignatureError(stderr)
def validate_signature(self, signedtext, cert_file, cert_type, node_name, node_id, id_attr):
"""
Validate signature on XML document.
:param signedtext: The XML document as a string
:param cert_file: The public key that was used to sign the document
:param cert_type: The file type of the certificate
:param node_name: The name of the class that is signed
:param node_id: The identifier of the node
:param id_attr: The attribute name for the identifier, normally one of
'id','Id' or 'ID'
:return: Boolean True if the signature was correct otherwise False.
"""
if not isinstance(signedtext, six.binary_type):
signedtext = signedtext.encode('utf-8')
tmp = make_temp(signedtext,
suffix=".xml",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--verify',
'--enabled-reference-uris', 'empty,same-doc',
'--pubkey-cert-{type}'.format(type=cert_type), cert_file,
'--id-attr:{id_attr_name}'.format(id_attr_name=id_attr),
node_name,
]
if node_id:
com_list.extend(['--node-id', node_id])
try:
(_stdout, stderr, _output) = self._run_xmlsec(com_list, [tmp.name])
except XmlsecError as e:
six.raise_from(SignatureError(com_list), e)
return parse_xmlsec_output(stderr)
def _run_xmlsec(self, com_list, extra_args):
"""
Common code to invoke xmlsec and parse the output.
:param com_list: Key-value parameter list for xmlsec
:param extra_args: Positional parameters to be appended after all
key-value parameters
:result: Whatever xmlsec wrote to an --output temporary file
"""
with NamedTemporaryFile(suffix='.xml') as ntf:
com_list.extend(['--output', ntf.name])
com_list += extra_args
logger.debug('xmlsec command: %s', ' '.join(com_list))
pof = Popen(com_list, stderr=PIPE, stdout=PIPE)
p_out, p_err = pof.communicate()
p_out = p_out.decode()
p_err = p_err.decode()
if pof.returncode != 0:
errmsg = "returncode={code}\nerror={err}\noutput={out}".format(
code=pof.returncode, err=p_err, out=p_out
)
logger.error(errmsg)
raise XmlsecError(errmsg)
ntf.seek(0)
return p_out, p_err, ntf.read()
class CryptoBackendXMLSecurity(CryptoBackend):
"""
CryptoBackend implementation using pyXMLSecurity to sign and verify
XML documents.
Encrypt and decrypt is currently unsupported by pyXMLSecurity.
pyXMLSecurity uses lxml (libxml2) to parse XML data, but otherwise
try to get by with native Python code. It does native Python RSA
signatures, or alternatively PyKCS11 to offload cryptographic work
to an external PKCS#11 module.
"""
def __init__(self):
CryptoBackend.__init__(self)
def version(self):
# XXX if XMLSecurity.__init__ included a __version__, that would be
# better than static 0.0 here.
return 'XMLSecurity 0.0'
def sign_statement(self, statement, node_name, key_file, node_id, id_attr):
"""
Sign an XML statement.
The parameters actually used in this CryptoBackend
implementation are :
:param statement: XML as string
:param node_name: Name of the node to sign
:param key_file: xmlsec key_spec string(), filename,
'pkcs11://' URI or PEM data
:returns: Signed XML as string
"""
import xmlsec
import lxml.etree
xml = xmlsec.parse_xml(statement)
signed = xmlsec.sign(xml, key_file)
signed_str = lxml.etree.tostring(signed, xml_declaration=False, encoding="UTF-8")
if not isinstance(signed_str, six.string_types):
signed_str = signed_str.decode("utf-8")
return signed_str
def validate_signature(self, signedtext, cert_file, cert_type, node_name, node_id, id_attr):
"""
Validate signature on XML document.
The parameters actually used in this CryptoBackend
implementation are :
:param signedtext: The signed XML data as string
:param cert_file: xmlsec key_spec string(), filename,
'pkcs11://' URI or PEM data
:param cert_type: string, must be 'pem' for now
:returns: True on successful validation, False otherwise
"""
if cert_type != 'pem':
raise Unsupported('Only PEM certs supported here')
import xmlsec
xml = xmlsec.parse_xml(signedtext)
try:
return xmlsec.verify(xml, cert_file)
except xmlsec.XMLSigException:
return False
def security_context(conf):
""" Creates a security context based on the configuration
:param conf: The configuration, this is a Config instance
:return: A SecurityContext instance
"""
if not conf:
return None
try:
metadata = conf.metadata
except AttributeError:
metadata = None
try:
id_attr = conf.id_attr_name
except AttributeError:
id_attr = None
sec_backend = None
if conf.crypto_backend == 'xmlsec1':
xmlsec_binary = conf.xmlsec_binary
if not xmlsec_binary:
try:
_path = conf.xmlsec_path
except AttributeError:
_path = []
xmlsec_binary = get_xmlsec_binary(_path)
# verify that xmlsec is where it's supposed to be
if not os.path.exists(xmlsec_binary):
# if not os.access(, os.F_OK):
err_msg = 'xmlsec binary not found: {binary}'
err_msg = err_msg.format(binary=xmlsec_binary)
raise SigverError(err_msg)
crypto = _get_xmlsec_cryptobackend(xmlsec_binary,
delete_tmpfiles=conf.delete_tmpfiles)
_file_name = conf.getattr('key_file', '')
if _file_name:
try:
rsa_key = import_rsa_key_from_file(_file_name)
except Exception as err:
logger.error('Cannot import key from {file}: {err_msg}'.format(
file=_file_name, err_msg=err))
raise
else:
sec_backend = RSACrypto(rsa_key)
elif conf.crypto_backend == 'XMLSecurity':
# new and somewhat untested pyXMLSecurity crypto backend.
crypto = CryptoBackendXMLSecurity()
else:
err_msg = 'Unknown crypto_backend {backend}'
err_msg = err_msg.format(backend=conf.crypto_backend)
raise SigverError(err_msg)
enc_key_files = []
if conf.encryption_keypairs is not None:
for _encryption_keypair in conf.encryption_keypairs:
if 'key_file' in _encryption_keypair:
enc_key_files.append(_encryption_keypair['key_file'])
return SecurityContext(
crypto,
conf.key_file,
cert_file=conf.cert_file,
metadata=metadata,
only_use_keys_in_metadata=conf.only_use_keys_in_metadata,
cert_handler_extra_class=conf.cert_handler_extra_class,
generate_cert_info=conf.generate_cert_info,
tmp_cert_file=conf.tmp_cert_file,
tmp_key_file=conf.tmp_key_file,
validate_certificate=conf.validate_certificate,
enc_key_files=enc_key_files,
encryption_keypairs=conf.encryption_keypairs,
sec_backend=sec_backend,
id_attr=id_attr,
delete_tmpfiles=conf.delete_tmpfiles)
def encrypt_cert_from_item(item):
_encrypt_cert = None
try:
try:
_elem = extension_elements_to_elements(
item.extensions.extension_elements, [pefim, ds])
except:
_elem = extension_elements_to_elements(
item.extension_elements[0].children,
[pefim, ds])
for _tmp_elem in _elem:
if isinstance(_tmp_elem, SPCertEnc):
for _tmp_key_info in _tmp_elem.key_info:
if _tmp_key_info.x509_data is not None and len(
_tmp_key_info.x509_data) > 0:
_encrypt_cert = _tmp_key_info.x509_data[
0].x509_certificate.text
break
except Exception as _exception:
pass
if _encrypt_cert is not None:
if _encrypt_cert.find('-----BEGIN CERTIFICATE-----\n') == -1:
_encrypt_cert = '-----BEGIN CERTIFICATE-----\n' + _encrypt_cert
if _encrypt_cert.find('\n-----END CERTIFICATE-----') == -1:
_encrypt_cert = _encrypt_cert + '\n-----END CERTIFICATE-----'
return _encrypt_cert
class CertHandlerExtra(object):
def __init__(self):
pass
def use_generate_cert_func(self):
raise Exception('use_generate_cert_func function must be implemented')
def generate_cert(self, generate_cert_info, root_cert_string,
root_key_string):
raise Exception('generate_cert function must be implemented')
# Excepts to return (cert_string, key_string)
def use_validate_cert_func(self):
raise Exception('use_validate_cert_func function must be implemented')
def validate_cert(self, cert_str, root_cert_string, root_key_string):
raise Exception('validate_cert function must be implemented')
# Excepts to return True/False
class CertHandler(object):
def __init__(
self,
security_context,
cert_file=None, cert_type='pem',
key_file=None, key_type='pem',
generate_cert_info=None,
cert_handler_extra_class=None,
tmp_cert_file=None,
tmp_key_file=None,
verify_cert=False):
"""
Initiates the class for handling certificates. Enables the certificates
to either be a single certificate as base functionality or makes it
possible to generate a new certificate for each call to the function.
:param security_context:
:param cert_file:
:param cert_type:
:param key_file:
:param key_type:
:param generate_cert_info:
:param cert_handler_extra_class:
:param tmp_cert_file:
:param tmp_key_file:
:param verify_cert:
"""
self._verify_cert = False
self._generate_cert = False
# This cert do not have to be valid, it is just the last cert to be
# validated.
self._last_cert_verified = None
self._last_validated_cert = None
if cert_type == 'pem' and key_type == 'pem':
self._verify_cert = verify_cert is True
self._security_context = security_context
self._osw = OpenSSLWrapper()
if key_file and os.path.isfile(key_file):
self._key_str = self._osw.read_str_from_file(key_file, key_type)
else:
self._key_str = ''
if cert_file and os.path.isfile(cert_file):
self._cert_str = self._osw.read_str_from_file(cert_file,
cert_type)
else:
self._cert_str = ''
self._tmp_cert_str = self._cert_str
self._tmp_key_str = self._key_str
self._tmp_cert_file = tmp_cert_file
self._tmp_key_file = tmp_key_file
self._cert_info = None
self._generate_cert_func_active = False
if generate_cert_info is not None \
and len(self._cert_str) > 0 \
and len(self._key_str) > 0 \
and tmp_key_file is not None \
and tmp_cert_file is not None:
self._generate_cert = True
self._cert_info = generate_cert_info
self._cert_handler_extra_class = cert_handler_extra_class
def verify_cert(self, cert_file):
if self._verify_cert:
if cert_file and os.path.isfile(cert_file):
cert_str = self._osw.read_str_from_file(cert_file, 'pem')
else:
return False
self._last_validated_cert = cert_str
if self._cert_handler_extra_class is not None and \
self._cert_handler_extra_class.use_validate_cert_func():
self._cert_handler_extra_class.validate_cert(
cert_str, self._cert_str, self._key_str)
else:
valid, mess = self._osw.verify(self._cert_str, cert_str)
logger.info('CertHandler.verify_cert: %s', mess)
return valid
return True
def generate_cert(self):
return self._generate_cert
def update_cert(self, active=False, client_crt=None):
if (self._generate_cert and active) or client_crt is not None:
if client_crt is not None:
self._tmp_cert_str = client_crt
# No private key for signing
self._tmp_key_str = ''
elif self._cert_handler_extra_class is not None and \
self._cert_handler_extra_class.use_generate_cert_func():
(self._tmp_cert_str, self._tmp_key_str) = \
self._cert_handler_extra_class.generate_cert(
self._cert_info, self._cert_str, self._key_str)
else:
self._tmp_cert_str, self._tmp_key_str = self._osw \
.create_certificate(self._cert_info, request=True)
self._tmp_cert_str = self._osw.create_cert_signed_certificate(
self._cert_str, self._key_str, self._tmp_cert_str)
valid, mess = self._osw.verify(self._cert_str,
self._tmp_cert_str)
self._osw.write_str_to_file(self._tmp_cert_file, self._tmp_cert_str)
self._osw.write_str_to_file(self._tmp_key_file, self._tmp_key_str)
self._security_context.key_file = self._tmp_key_file
self._security_context.cert_file = self._tmp_cert_file
self._security_context.key_type = 'pem'
self._security_context.cert_type = 'pem'
self._security_context.my_cert = read_cert_from_file(
self._security_context.cert_file,
self._security_context.cert_type)
# How to get a rsa pub key fingerprint from a certificate
# openssl x509 -inform pem -noout -in server.crt -pubkey > publickey.pem
# openssl rsa -inform pem -noout -in publickey.pem -pubin -modulus
class SecurityContext(object):
DEFAULT_ID_ATTR_NAME = 'ID'
my_cert = None
def __init__(
self,
crypto,
key_file='', key_type='pem',
cert_file='', cert_type='pem',
metadata=None,
template='',
encrypt_key_type='des-192',
only_use_keys_in_metadata=False,
cert_handler_extra_class=None,
generate_cert_info=None,
tmp_cert_file=None, tmp_key_file=None,
validate_certificate=None,
enc_key_files=None, enc_key_type='pem',
encryption_keypairs=None,
enc_cert_type='pem',
sec_backend=None,
id_attr='',
delete_tmpfiles=True):
self.id_attr = id_attr or SecurityContext.DEFAULT_ID_ATTR_NAME
self.crypto = crypto
assert (isinstance(self.crypto, CryptoBackend))
if sec_backend:
assert (isinstance(sec_backend, RSACrypto))
self.sec_backend = sec_backend
# Your private key for signing
self.key_file = key_file
self.key_type = key_type
# Your public key for signing
self.cert_file = cert_file
self.cert_type = cert_type
# Your private key for encryption
self.enc_key_files = enc_key_files
self.enc_key_type = enc_key_type
# Your public key for encryption
self.encryption_keypairs = encryption_keypairs
self.enc_cert_type = enc_cert_type
self.my_cert = read_cert_from_file(cert_file, cert_type)
self.cert_handler = CertHandler(
self,
cert_file, cert_type,
key_file, key_type,
generate_cert_info,
cert_handler_extra_class,
tmp_cert_file,
tmp_key_file,
validate_certificate)
self.cert_handler.update_cert(True)
self.metadata = metadata
self.only_use_keys_in_metadata = only_use_keys_in_metadata
if not template:
this_dir, this_filename = os.path.split(__file__)
self.template = os.path.join(this_dir, 'xml_template', 'template.xml')
else:
self.template = template
self.encrypt_key_type = encrypt_key_type
self.delete_tmpfiles = delete_tmpfiles
def correctly_signed(self, xml, must=False):
logger.debug('verify correct signature')
return self.correctly_signed_response(xml, must)
def encrypt(self, text, recv_key='', template='', key_type=''):
"""
xmlsec encrypt --pubkey-pem pub-userkey.pem
--session-key aes128-cbc --xml-data doc-plain.xml
--output doc-encrypted.xml session-key-template.xml
:param text: Text to encrypt
:param recv_key: A file containing the receivers public key
:param template: A file containing the XMLSEC template
:param key_type: The type of session key to use
:result: An encrypted XML text
"""
if not key_type:
key_type = self.encrypt_key_type
if not template:
template = self.template
return self.crypto.encrypt(text, recv_key, template, key_type)
def encrypt_assertion(self, statement, enc_key, template, key_type='des-192', node_xpath=None):
"""
Will encrypt an assertion
:param statement: A XML document that contains the assertion to encrypt
:param enc_key: File name of a file containing the encryption key
:param template: A template for the encryption part to be added.
:param key_type: The type of session key to use.
:return: The encrypted text
"""
return self.crypto.encrypt_assertion(
statement, enc_key, template, key_type, node_xpath)
def decrypt_keys(self, enctext, keys=None, id_attr=''):
""" Decrypting an encrypted text by the use of a private key.
:param enctext: The encrypted text as a string
:param keys: Keys to try to decrypt enctext with
:param id_attr: The attribute name for the identifier, normally one of
'id','Id' or 'ID'
:return: The decrypted text
"""
key_files = []
if not isinstance(keys, list):
keys = [keys]
keys_filtered = (key for key in keys if key)
keys_encoded = (
key.encode("ascii") if not isinstance(key, six.binary_type) else key
for key in keys_filtered
)
key_files = list(
make_temp(key, decode=False, delete_tmpfiles=self.delete_tmpfiles)
for key in keys_encoded
)
key_file_names = list(tmp.name for tmp in key_files)
try:
dectext = self.decrypt(enctext, key_file=key_file_names, id_attr=id_attr)
except DecryptError as e:
raise
else:
return dectext
def decrypt(self, enctext, key_file=None, id_attr=''):
""" Decrypting an encrypted text by the use of a private key.
:param enctext: The encrypted text as a string
:return: The decrypted text
"""
if not id_attr:
id_attr = self.id_attr
if not isinstance(key_file, list):
key_file = [key_file]
key_files = [
key for key in itertools.chain(key_file, self.enc_key_files) if key
]
for key_file in key_files:
try:
dectext = self.crypto.decrypt(enctext, key_file, id_attr)
except XmlsecError as e:
continue
else:
if dectext:
return dectext
errmsg = "No key was able to decrypt the ciphertext. Keys tried: {keys}"
errmsg = errmsg.format(keys=key_files)
raise DecryptError(errmsg)
def verify_signature(self, signedtext, cert_file=None, cert_type='pem', node_name=NODE_NAME, node_id=None, id_attr=''):
""" Verifies the signature of a XML document.
:param signedtext: The XML document as a string
:param cert_file: The public key that was used to sign the document
:param cert_type: The file type of the certificate
:param node_name: The name of the class that is signed
:param node_id: The identifier of the node
:param id_attr: The attribute name for the identifier, normally one of
'id','Id' or 'ID'
:return: Boolean True if the signature was correct otherwise False.
"""
# This is only for testing purposes, otherwise when would you receive
# stuff that is signed with your key !?
if not cert_file:
cert_file = self.cert_file
cert_type = self.cert_type
if not id_attr:
id_attr = self.id_attr
return self.crypto.validate_signature(
signedtext,
cert_file=cert_file,
cert_type=cert_type,
node_name=node_name,
node_id=node_id,
id_attr=id_attr)
def _check_signature(self, decoded_xml, item, node_name=NODE_NAME, origdoc=None, id_attr='', must=False, only_valid_cert=False, issuer=None):
try:
_issuer = item.issuer.text.strip()
except AttributeError:
_issuer = None
if _issuer is None:
try:
_issuer = issuer.text.strip()
except AttributeError:
_issuer = None
# More trust in certs from metadata then certs in the XML document
if self.metadata:
try:
_certs = self.metadata.certs(_issuer, 'any', 'signing')
except KeyError:
_certs = []
certs = []
for cert in _certs:
if isinstance(cert, six.string_types):
content = pem_format(cert)
tmp = make_temp(content,
suffix=".pem",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
certs.append(tmp)
else:
certs.append(cert)
else:
certs = []
if not certs and not self.only_use_keys_in_metadata:
logger.debug('==== Certs from instance ====')
certs = [
make_temp(content=pem_format(cert),
suffix=".pem",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
for cert in cert_from_instance(item)
]
else:
logger.debug('==== Certs from metadata ==== %s: %s ====', _issuer, certs)
if not certs:
raise MissingKey(_issuer)
# saml-core section "5.4 XML Signature Profile" defines constrains on the
# xmldsig-core facilities. It explicitly dictates that enveloped signatures
# are the only signatures allowed. This mean that:
# * Assertion/RequestType/ResponseType elements must have an ID attribute
# * signatures must have a single Reference element
# * the Reference element must have a URI attribute
# * the URI attribute contains an anchor
# * the anchor points to the enclosing element's ID attribute
references = item.signature.signed_info.reference
signatures_must_have_a_single_reference_element = len(references) == 1
the_Reference_element_must_have_a_URI_attribute = (
signatures_must_have_a_single_reference_element
and hasattr(references[0], "uri")
)
the_URI_attribute_contains_an_anchor = (
the_Reference_element_must_have_a_URI_attribute
and references[0].uri.startswith("#")
and len(references[0].uri) > 1
)
the_anchor_points_to_the_enclosing_element_ID_attribute = (
the_URI_attribute_contains_an_anchor
and references[0].uri == "#{id}".format(id=item.id)
)
validators = {
"signatures must have a single reference element": (
signatures_must_have_a_single_reference_element
),
"the Reference element must have a URI attribute": (
the_Reference_element_must_have_a_URI_attribute
),
"the URI attribute contains an anchor": (
the_URI_attribute_contains_an_anchor
),
"the anchor points to the enclosing element ID attribute": (
the_anchor_points_to_the_enclosing_element_ID_attribute
),
}
if not all(validators.values()):
error_context = {
"message": "Signature failed to meet constraints on xmldsig",
"validators": validators,
"item ID": item.id,
"reference URI": item.signature.signed_info.reference[0].uri,
"issuer": _issuer,
"node name": node_name,
"xml document": decoded_xml,
}
raise SignatureError(error_context)
verified = False
last_pem_file = None
for pem_fd in certs:
try:
last_pem_file = pem_fd.name
if self.verify_signature(
decoded_xml,
pem_fd.name,
node_name=node_name,
node_id=item.id,
id_attr=id_attr):
verified = True
break
except XmlsecError as exc:
logger.error('check_sig: %s', exc)
pass
except Exception as exc:
logger.error('check_sig: %s', exc)
raise
if verified or only_valid_cert:
if not self.cert_handler.verify_cert(last_pem_file):
raise CertificateError('Invalid certificate!')
else:
raise SignatureError('Failed to verify signature')
return item
def check_signature(self, item, node_name=NODE_NAME, origdoc=None, id_attr='', must=False, issuer=None):
"""
:param item: Parsed entity
:param node_name: The name of the node/class/element that is signed
:param origdoc: The original XML string
:param id_attr: The attribute name for the identifier, normally one of
'id','Id' or 'ID'
:param must:
:return:
"""
return self._check_signature(
origdoc,
item,
node_name,
origdoc,
id_attr=id_attr,
must=must,
issuer=issuer)
def correctly_signed_message(self, decoded_xml, msgtype, must=False, origdoc=None, only_valid_cert=False):
"""Check if a request is correctly signed, if we have metadata for
the entity that sent the info use that, if not use the key that are in
the message if any.
:param decoded_xml: The SAML message as an XML infoset (a string)
:param msgtype: SAML protocol message type
:param must: Whether there must be a signature
:param origdoc:
:return:
"""
attr = '{type}_from_string'.format(type=msgtype)
_func = getattr(saml, attr, None)
_func = getattr(samlp, attr, _func)
msg = _func(decoded_xml)
if not msg:
raise TypeError('Not a {type}'.format(type=msgtype))
if not msg.signature:
if must:
err_msg = 'Required signature missing on {type}'
err_msg = err_msg.format(type=msgtype)
raise SignatureError(err_msg)
else:
return msg
return self._check_signature(
decoded_xml,
msg,
class_name(msg),
origdoc,
must=must,
only_valid_cert=only_valid_cert)
def correctly_signed_authn_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authn_request', must, origdoc, only_valid_cert=only_valid_cert)
def correctly_signed_authn_query(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authn_query', must, origdoc, only_valid_cert)
def correctly_signed_logout_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'logout_request', must, origdoc, only_valid_cert)
def correctly_signed_logout_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'logout_response', must, origdoc, only_valid_cert)
def correctly_signed_attribute_query(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'attribute_query', must, origdoc, only_valid_cert)
def correctly_signed_authz_decision_query(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authz_decision_query', must, origdoc, only_valid_cert)
def correctly_signed_authz_decision_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authz_decision_response', must, origdoc, only_valid_cert)
def correctly_signed_name_id_mapping_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'name_id_mapping_request', must, origdoc, only_valid_cert)
def correctly_signed_name_id_mapping_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'name_id_mapping_response', must, origdoc, only_valid_cert)
def correctly_signed_artifact_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'artifact_request', must, origdoc, only_valid_cert)
def correctly_signed_artifact_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'artifact_response', must, origdoc, only_valid_cert)
def correctly_signed_manage_name_id_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'manage_name_id_request', must, origdoc, only_valid_cert)
def correctly_signed_manage_name_id_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'manage_name_id_response', must, origdoc, only_valid_cert)
def correctly_signed_assertion_id_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'assertion_id_request', must, origdoc, only_valid_cert)
def correctly_signed_assertion_id_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'assertion', must, origdoc, only_valid_cert)
def correctly_signed_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, require_response_signature=False, **kwargs):
""" Check if a instance is correctly signed, if we have metadata for
the IdP that sent the info use that, if not use the key that are in
the message if any.
:param decoded_xml: The SAML message as a XML string
:param must: Whether there must be a signature
:param origdoc:
:param only_valid_cert:
:param require_response_signature:
:return: None if the signature can not be verified otherwise an instance
"""
response = samlp.any_response_from_string(decoded_xml)
if not response:
raise TypeError('Not a Response')
if response.signature:
if 'do_not_verify' in kwargs:
pass
else:
self._check_signature(decoded_xml, response,
class_name(response), origdoc)
elif require_response_signature:
raise SignatureError('Signature missing for response')
return response
def sign_statement_using_xmlsec(self, statement, **kwargs):
""" Deprecated function. See sign_statement(). """
return self.sign_statement(statement, **kwargs)
def sign_statement(self, statement, node_name, key=None, key_file=None, node_id=None, id_attr=''):
"""Sign a SAML statement.
:param statement: The statement to be signed
:param node_name: string like 'urn:oasis:names:...:Assertion'
:param key: The key to be used for the signing, either this or
:param key_file: The file where the key can be found
:param node_id:
:param id_attr: The attribute name for the identifier, normally one of
'id','Id' or 'ID'
:return: The signed statement
"""
if not id_attr:
id_attr = self.id_attr
if not key_file and key:
content = str(key).encode()
tmp = make_temp(content, suffix=".pem", delete_tmpfiles=self.delete_tmpfiles)
key_file = tmp.name
if not key and not key_file:
key_file = self.key_file
return self.crypto.sign_statement(
statement,
node_name,
key_file,
node_id,
id_attr)
def sign_assertion_using_xmlsec(self, statement, **kwargs):
""" Deprecated function. See sign_assertion(). """
return self.sign_statement(
statement, class_name(saml.Assertion()), **kwargs)
def sign_assertion(self, statement, **kwargs):
"""Sign a SAML assertion.
See sign_statement() for the kwargs.
:param statement: The statement to be signed
:return: The signed statement
"""
return self.sign_statement(
statement, class_name(saml.Assertion()), **kwargs)
def sign_attribute_query_using_xmlsec(self, statement, **kwargs):
""" Deprecated function. See sign_attribute_query(). """
return self.sign_attribute_query(statement, **kwargs)
def sign_attribute_query(self, statement, **kwargs):
"""Sign a SAML attribute query.
See sign_statement() for the kwargs.
:param statement: The statement to be signed
:return: The signed statement
"""
return self.sign_statement(
statement, class_name(samlp.AttributeQuery()), **kwargs)
def multiple_signatures(self, statement, to_sign, key=None, key_file=None, sign_alg=None, digest_alg=None):
"""
Sign multiple parts of a statement
:param statement: The statement that should be sign, this is XML text
:param to_sign: A list of (items, id, id attribute name) tuples that
specifies what to sign
:param key: A key that should be used for doing the signing
:param key_file: A file that contains the key to be used
:return: A possibly multiple signed statement
"""
for (item, sid, id_attr) in to_sign:
if not sid:
if not item.id:
sid = item.id = sid()
else:
sid = item.id
if not item.signature:
item.signature = pre_signature_part(
sid,
self.cert_file,
sign_alg=sign_alg,
digest_alg=digest_alg)
statement = self.sign_statement(
statement,
class_name(item),
key=key,
key_file=key_file,
node_id=sid,
id_attr=id_attr)
return statement
def pre_signature_part(ident, public_key=None, identifier=None, digest_alg=None, sign_alg=None):
"""
If an assertion is to be signed the signature part has to be preset
with which algorithms to be used, this function returns such a
preset part.
:param ident: The identifier of the assertion, so you know which assertion
was signed
:param public_key: The base64 part of a PEM file
:param identifier:
:return: A preset signature part
"""
if not digest_alg:
digest_alg = ds.DefaultSignature().get_digest_alg()
if not sign_alg:
sign_alg = ds.DefaultSignature().get_sign_alg()
signature_method = ds.SignatureMethod(algorithm=sign_alg)
canonicalization_method = ds.CanonicalizationMethod(
algorithm=ds.ALG_EXC_C14N)
trans0 = ds.Transform(algorithm=ds.TRANSFORM_ENVELOPED)
trans1 = ds.Transform(algorithm=ds.ALG_EXC_C14N)
transforms = ds.Transforms(transform=[trans0, trans1])
digest_method = ds.DigestMethod(algorithm=digest_alg)
reference = ds.Reference(
uri='#{id}'.format(id=ident),
digest_value=ds.DigestValue(),
transforms=transforms,
digest_method=digest_method)
signed_info = ds.SignedInfo(
signature_method=signature_method,
canonicalization_method=canonicalization_method,
reference=reference)
signature = ds.Signature(
signed_info=signed_info,
signature_value=ds.SignatureValue())
if identifier:
signature.id = 'Signature{n}'.format(n=identifier)
if public_key:
x509_data = ds.X509Data(
x509_certificate=[ds.X509Certificate(text=public_key)])
key_info = ds.KeyInfo(x509_data=x509_data)
signature.key_info = key_info
return signature
# <?xml version="1.0" encoding="UTF-8"?>
# <EncryptedData Id="ED" Type="http://www.w3.org/2001/04/xmlenc#Element"
# xmlns="http://www.w3.org/2001/04/xmlenc#">
# <EncryptionMethod Algorithm="http://www.w3
# .org/2001/04/xmlenc#tripledes-cbc"/>
# <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
# <EncryptedKey Id="EK" xmlns="http://www.w3.org/2001/04/xmlenc#">
# <EncryptionMethod Algorithm="http://www.w3
# .org/2001/04/xmlenc#rsa-1_5"/>
# <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
# <ds:KeyName>my-rsa-key</ds:KeyName>
# </ds:KeyInfo>
# <CipherData>
# <CipherValue>
# </CipherValue>
# </CipherData>
# <ReferenceList>
# <DataReference URI="#ED"/>
# </ReferenceList>
# </EncryptedKey>
# </ds:KeyInfo>
# <CipherData>
# <CipherValue>
# </CipherValue>
# </CipherData>
# </EncryptedData>
def pre_encryption_part(msg_enc=TRIPLE_DES_CBC, key_enc=RSA_1_5, key_name='my-rsa-key'):
"""
:param msg_enc:
:param key_enc:
:param key_name:
:return:
"""
msg_encryption_method = EncryptionMethod(algorithm=msg_enc)
key_encryption_method = EncryptionMethod(algorithm=key_enc)
encrypted_key = EncryptedKey(
id='EK',
encryption_method=key_encryption_method,
key_info=ds.KeyInfo(
key_name=ds.KeyName(text=key_name)),
cipher_data=CipherData(
cipher_value=CipherValue(text='')))
key_info = ds.KeyInfo(encrypted_key=encrypted_key)
encrypted_data = EncryptedData(
id='ED',
type='http://www.w3.org/2001/04/xmlenc#Element',
encryption_method=msg_encryption_method,
key_info=key_info,
cipher_data=CipherData(cipher_value=CipherValue(text='')))
return encrypted_data
def pre_encrypt_assertion(response):
"""
Move the assertion to within a encrypted_assertion
:param response: The response with one assertion
:return: The response but now with the assertion within an
encrypted_assertion.
"""
assertion = response.assertion
response.assertion = None
response.encrypted_assertion = EncryptedAssertion()
if assertion is not None:
if isinstance(assertion, list):
response.encrypted_assertion.add_extension_elements(assertion)
else:
response.encrypted_assertion.add_extension_element(assertion)
return response
def response_factory(sign=False, encrypt=False, sign_alg=None, digest_alg=None,
**kwargs):
response = samlp.Response(id=sid(), version=VERSION,
issue_instant=instant())
if sign:
response.signature = pre_signature_part(
kwargs['id'], sign_alg=sign_alg, digest_alg=digest_alg)
if encrypt:
pass
for key, val in kwargs.items():
setattr(response, key, val)
return response
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--list-sigalgs', dest='listsigalgs',
action='store_true',
help='List implemented signature algorithms')
args = parser.parse_args()
if args.listsigalgs:
print('\n'.join([key for key, value in SIGNER_ALGS.items()]))
| ./CrossVul/dataset_final_sorted/CWE-347/py/good_4590_0 |
crossvul-python_data_good_4360_1 | import logging
import os.path
import warnings
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
from oic import rndstr
from oic.exception import AuthzError
from oic.exception import MessageException
from oic.exception import NotForMe
from oic.exception import PyoidcError
from oic.oauth2 import Grant
from oic.oauth2.consumer import TokenError
from oic.oauth2.consumer import UnknownState
from oic.oauth2.consumer import stateID
from oic.oauth2.message import ErrorResponse
from oic.oic import ENDPOINTS
from oic.oic import Client
from oic.oic.message import AccessTokenResponse
from oic.oic.message import AuthorizationRequest
from oic.oic.message import AuthorizationResponse
from oic.oic.message import BackChannelLogoutRequest
from oic.oic.message import Claims
from oic.oic.message import ClaimsRequest
from oic.oic.message import IdToken
from oic.utils import http_util
from oic.utils.sanitize import sanitize
from oic.utils.sdb import DictSessionBackend
from oic.utils.sdb import SessionBackend
from oic.utils.sdb import session_extended_get
from oic.utils.sdb import session_get
from oic.utils.sdb import session_update
__author__ = "rohe0002"
logger = logging.getLogger(__name__)
def factory(kaka, sdb, config):
"""
Return the right Consumer instance dependent on what's in the cookie.
:param kaka: The cookie
:param sdb: The session database
:param config: The common Consumer configuration
:return: Consumer instance or None
"""
part = http_util.cookie_parts(config["name"], kaka)
if part is None:
return None
cons = Consumer(sdb, config)
cons.restore(part[0])
http_util.parse_cookie(config["name"], cons.seed, kaka)
return cons
def build_userinfo_claims(claims, sformat="signed", locale="us-en"):
"""
Create userinfo request based on claims.
config example::
"userinfo":{
"name": {"essential": true},
"nickname": null,
"email": {"essential": true},
"email_verified": {"essential": true},
"picture": null
}
"""
return Claims(format=sformat, **claims)
def clean_response(aresp):
"""
Create a new instance with only the standard attributes.
:param aresp: The original AccessTokenResponse
:return: An AccessTokenResponse instance
"""
atr = AccessTokenResponse()
for prop in atr.parameters():
try:
atr[prop] = aresp[prop]
except KeyError:
pass
return atr
IGNORE = [
"request2endpoint",
"response2error",
"grant_class",
"token_class",
"sdb",
"wf",
"events",
"message_factory",
]
CONSUMER_PREF_ARGS = [
"token_endpoint_auth_method",
"subject_type",
"require_signed_request_object",
"userinfo_signed_response_algs",
"userinfo_encrypted_response_alg",
"userinfo_encrypted_response_enc",
"userinfo_encrypted_response_int",
"id_token_signed_response_algs",
"id_token_encrypted_response_alg",
"id_token_encrypted_response_enc",
"id_token_encrypted_response_int",
"request_object_signing_alg",
"request_object_encryption_alg",
"request_object_encryption_enc",
"default_max_age",
"require_auth_time",
"default_acr_values",
]
class Consumer(Client):
"""An OpenID Connect consumer implementation."""
def __init__(
self,
session_db,
consumer_config,
client_config=None,
server_info=None,
debug=False,
client_prefs=None,
sso_db=None,
):
"""
Initialize a Consumer instance.
:param session_db: Where info are kept about sessions
:param config: Configuration of the consumer
:param client_config: Client configuration
:param server_info: Information about the server
:param client_prefs: Run time preferences, which are chosen depends
on what the server can do.
"""
if client_config is None:
client_config = {}
Client.__init__(self, **client_config)
self.consumer_config = consumer_config
if consumer_config:
try:
self.debug = consumer_config["debug"]
except KeyError:
self.debug = 0
if server_info:
for endpoint in ENDPOINTS:
try:
setattr(self, endpoint, server_info[endpoint])
except KeyError:
setattr(self, endpoint, "")
if not isinstance(session_db, SessionBackend):
warnings.warn(
"Please use `SessionBackend` to ensure proper API for the database.",
DeprecationWarning,
)
self.sdb = session_db
if sso_db is not None:
if not isinstance(sso_db, SessionBackend):
warnings.warn(
"Please use `SessionBackend` to ensure proper API for the database.",
DeprecationWarning,
)
self.sso_db: SessionBackend = sso_db
else:
self.sso_db = DictSessionBackend()
self.debug = debug
self.seed = ""
self.nonce = ""
self.request_filename = ""
self.request_uri = ""
self.user_info = None
self.registration_expires_at = 0
self.secret_type = "Bearer"
def update(self, sid):
"""
Update the instance variables from something stored in the session database.
Will not overwrite something that's already there.
Except for the grant dictionary !!
:param sid: Session identifier
"""
for key, val in self.sdb[sid].items():
try:
_val = getattr(self, key)
except AttributeError:
continue
if not _val and val:
setattr(self, key, val)
elif key == "grant" and val:
# val is a Grant instance
val.update(_val)
setattr(self, key, val)
def restore(self, sid):
"""
Restore the instance variables from something stored in the session database.
:param sid: Session identifier
"""
for key, val in self.sdb[sid].items():
setattr(self, key, val)
def dictionary(self):
return dict([(k, v) for k, v in self.__dict__.items() if k not in IGNORE])
def _backup(self, sid):
"""
Store instance variable values in the session store under a session identifier.
:param sid: Session identifier
"""
self.sdb[sid] = self.dictionary()
def begin(self, scope="", response_type="", use_nonce=False, path="", **kwargs):
"""
Begin the OIDC flow.
:param scope: Defines which user info claims is wanted
:param response_type: Controls the parameters returned in the response from the Authorization Endpoint
:param use_nonce: If not implicit flow nonce is optional. This defines if it should be used anyway.
:param path: The path part of the redirect URL
:return: A 2-tuple, session identifier and URL to which the user should be redirected
"""
_log_info = logger.info
if self.debug:
_log_info("- begin -")
_page = self.consumer_config["authz_page"]
if not path.endswith("/"):
if _page.startswith("/"):
self.redirect_uris = [path + _page]
else:
self.redirect_uris = ["%s/%s" % (path, _page)]
else:
if _page.startswith("/"):
self.redirect_uris = [path + _page[1:]]
else:
self.redirect_uris = ["%s/%s" % (path, _page)]
# Put myself in the dictionary of sessions, keyed on session-id
if not self.seed:
self.seed = rndstr()
if not scope:
scope = self.consumer_config["scope"]
if not response_type:
response_type = self.consumer_config["response_type"]
sid = stateID(path, self.seed)
self.grant[sid] = Grant(seed=self.seed)
self._backup(sid)
self.sdb["seed:%s" % self.seed] = sid
self.sso_db[sid] = {}
args = {
"client_id": self.client_id,
"state": sid,
"response_type": response_type,
"scope": scope,
}
# nonce is REQUIRED in implicit flow,
# OPTIONAL on code flow.
if "token" in response_type or use_nonce:
args["nonce"] = rndstr(12)
self.state2nonce[sid] = args["nonce"]
if "max_age" in self.consumer_config:
args["max_age"] = self.consumer_config["max_age"]
_claims = None
if "user_info" in self.consumer_config:
_claims = ClaimsRequest(
userinfo=Claims(**self.consumer_config["user_info"])
)
if "id_token" in self.consumer_config:
if _claims:
_claims["id_token"] = Claims(**self.consumer_config["id_token"])
else:
_claims = ClaimsRequest(
id_token=Claims(**self.consumer_config["id_token"])
)
if _claims:
args["claims"] = _claims
if "request_method" in self.consumer_config:
areq = self.construct_AuthorizationRequest(
request_args=args, extra_args=None, request_param="request"
)
if self.consumer_config["request_method"] == "file":
id_request = areq["request"]
del areq["request"]
_filedir = self.consumer_config["temp_dir"]
_webpath = self.consumer_config["temp_path"]
_name = rndstr(10)
filename = os.path.join(_filedir, _name)
while os.path.exists(filename):
_name = rndstr(10)
filename = os.path.join(_filedir, _name)
fid = open(filename, mode="w")
fid.write(id_request)
fid.close()
_webname = "%s%s/%s" % (path, _webpath, _name)
areq["request_uri"] = _webname
self.request_uri = _webname
self._backup(sid)
else:
if "userinfo_claims" in args: # can only be carried in an IDRequest
raise PyoidcError("Need a request method")
areq = self.construct_AuthorizationRequest(
AuthorizationRequest, request_args=args
)
location = areq.request(self.authorization_endpoint)
if self.debug:
_log_info("Redirecting to: %s" % location)
self.authz_req[areq["state"]] = areq
return sid, location
def _parse_authz(self, query="", **kwargs):
_log_info = logger.info
# Might be an error response
_log_info("Expect Authorization Response")
aresp = self.parse_response(
AuthorizationResponse, info=query, sformat="urlencoded", keyjar=self.keyjar
)
if isinstance(aresp, ErrorResponse):
_log_info("ErrorResponse: %s" % sanitize(aresp))
raise AuthzError(aresp.get("error"), aresp)
_log_info("Aresp: %s" % sanitize(aresp))
_state = aresp["state"]
try:
self.update(_state)
except KeyError:
raise UnknownState(_state, aresp)
self.redirect_uris = [self.sdb[_state]["redirect_uris"]]
return aresp, _state
def parse_authz(
self, query="", **kwargs
) -> Union[
http_util.BadRequest,
Tuple[
Optional[AuthorizationResponse],
Optional[AccessTokenResponse],
Optional[IdToken],
],
]:
"""
Parse authorization response from server.
Couple of cases
["code"]
["code", "token"]
["code", "id_token", "token"]
["id_token"]
["id_token", "token"]
["token"]
"""
_log_info = logger.info
logger.debug("- authorization -")
# FIXME: This shouldn't be here... We should rather raise a sepcific Client error
# That would simplify the return value of this function
# and drop bunch of assertions from tests added in this commit.
if not query:
return http_util.BadRequest("Missing query")
_log_info("response: %s" % sanitize(query))
if "algs" not in kwargs:
kwargs["algs"] = self.sign_enc_algs("id_token")
if "code" in self.consumer_config["response_type"]:
aresp, _state = self._parse_authz(query, **kwargs)
# May have token and id_token information too
if "access_token" in aresp:
atr = clean_response(aresp)
self.access_token = atr
# update the grant object
self.get_grant(state=_state).add_token(atr)
else:
atr = None
self._backup(_state)
try:
idt = aresp["id_token"]
except KeyError:
idt = None
else:
try:
session_update(self.sdb, idt["sid"], "smid", _state)
except KeyError:
pass
elif "token" in self.consumer_config["response_type"]: # implicit flow
_log_info("Expect Access Token Response")
aresp = None
_state = None
atr = self.parse_response(
AccessTokenResponse,
info=query,
sformat="urlencoded",
keyjar=self.keyjar,
**kwargs,
)
if isinstance(atr, ErrorResponse):
raise TokenError(atr.get("error"), atr)
idt = atr.get("id_token")
else: # only id_token
aresp, _state = self._parse_authz(query, **kwargs)
try:
idt = aresp["id_token"]
except KeyError:
idt = None
else:
try:
session_update(self.sso_db, _state, "smid", idt["sid"])
except KeyError:
pass
# Null the aresp as only id_token should be returned
aresp = atr = None
# Verify the IdToken if it was present
if idt is not None:
self.verify_id_token(idt, self.authz_req.get(_state or atr["state"]))
return aresp, atr, idt
def complete(self, state):
"""
Do the access token request, the last step in a code flow.
If Implicit flow was used then this method is never used.
"""
args = {"redirect_uri": self.redirect_uris[0]}
if "password" in self.consumer_config and self.consumer_config["password"]:
logger.info("basic auth")
http_args = {"password": self.consumer_config["password"]}
elif self.client_secret:
logger.info("request_body auth")
http_args = {}
args.update(
{
"client_secret": self.client_secret,
"client_id": self.client_id,
"secret_type": self.secret_type,
}
)
else:
raise PyoidcError("Nothing to authenticate with")
resp = self.do_access_token_request(
state=state, request_args=args, http_args=http_args
)
logger.info("Access Token Response: %s" % sanitize(resp))
if resp.type() == "ErrorResponse":
raise TokenError(resp.error, resp)
self._backup(state)
return resp
def refresh_token(self):
pass
def get_user_info(self, state):
uinfo = self.do_user_info_request(state=state, schema="openid")
if uinfo.type() == "ErrorResponse":
raise TokenError(uinfo.error, uinfo)
self.user_info = uinfo
self._backup(state)
return uinfo
def refresh_session(self):
pass
def check_session(self):
"""
Check session endpoint.
With python you could use PyQuery to get the onclick attribute of each
anchor tag, parse that with a regular expression to get the placeId,
build the /places/duplicates.jsp?inPID= URL yourself, use requests to
load the content at that URL, then PyQuery again on the content to get
the data you need.
for iframe in mosoup("iframe"):
mosoup.iframe.extract()
It accepts postMessage requests from the relevant RP iframe and uses
postMessage to post back the login status of the End-User at the OP.
:return:
"""
pass
def end_session(self):
pass
# LOGOUT related
def backchannel_logout(
self, request: Optional[str] = None, request_args: Optional[Dict] = None
) -> str:
"""
Receives a back channel logout request.
:param request: A urlencoded request
:param request_args: The request as a dictionary
:return: A Session Identifier
"""
if request:
req = BackChannelLogoutRequest().from_urlencoded(request)
elif request_args is not None:
req = BackChannelLogoutRequest(**request_args)
else:
raise ValueError("Missing request specification")
kwargs = {"aud": self.client_id, "iss": self.issuer, "keyjar": self.keyjar}
try:
req.verify(**kwargs)
except (MessageException, ValueError, NotForMe) as err:
raise MessageException("Bogus logout request: {}".format(err))
# Find the subject through 'sid' or 'sub'
try:
sub = req["logout_token"]["sub"]
except KeyError:
# verify has guaranteed that there will be a sid if sub is missing
sm_id = req["logout_token"]["sid"]
_sid = session_get(self.sso_db, "smid", sm_id)
else:
_sid = session_extended_get(
self.sso_db, sub, "issuer", req["logout_token"]["iss"]
)
return _sid
| ./CrossVul/dataset_final_sorted/CWE-347/py/good_4360_1 |
crossvul-python_data_good_4085_2 | #!/usr/bin/env python
# Copyright 2012 - 2017, New York University and the TUF contributors
# SPDX-License-Identifier: MIT OR Apache-2.0
"""
<Program Name>
sig.py
<Author>
Vladimir Diaz <vladimir.v.diaz@gmail.com>
<Started>
February 28, 2012. Based on a previous version by Geremy Condra.
<Copyright>
See LICENSE-MIT OR LICENSE for licensing information.
<Purpose>
Survivable key compromise is one feature of a secure update system
incorporated into TUF's design. Responsibility separation through
the use of multiple roles, multi-signature trust, and explicit and
implicit key revocation are some of the mechanisms employed towards
this goal of survivability. These mechanisms can all be seen in
play by the functions available in this module.
The signed metadata files utilized by TUF to download target files
securely are used and represented here as the 'signable' object.
More precisely, the signature structures contained within these metadata
files are packaged into 'signable' dictionaries. This module makes it
possible to capture the states of these signatures by organizing the
keys into different categories. As keys are added and removed, the
system must securely and efficiently verify the status of these signatures.
For instance, a bunch of keys have recently expired. How many valid keys
are now available to the Snapshot role? This question can be answered by
get_signature_status(), which will return a full 'status report' of these
'signable' dicts. This module also provides a convenient verify() function
that will determine if a role still has a sufficient number of valid keys.
If a caller needs to update the signatures of a 'signable' object, there
is also a function for that.
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
import tuf
import tuf.keydb
import tuf.roledb
import tuf.formats
import securesystemslib
# See 'log.py' to learn how logging is handled in TUF.
logger = logging.getLogger('tuf.sig')
# Disable 'iso8601' logger messages to prevent 'iso8601' from clogging the
# log file.
iso8601_logger = logging.getLogger('iso8601')
iso8601_logger.disabled = True
def get_signature_status(signable, role=None, repository_name='default',
threshold=None, keyids=None):
"""
<Purpose>
Return a dictionary representing the status of the signatures listed in
'signable'. Signatures in the returned dictionary are identified by the
signature keyid and can have a status of either:
* bad -- Invalid signature
* good -- Valid signature from key that is available in 'tuf.keydb', and is
authorized for the passed role as per 'tuf.roledb' (authorization may be
overwritten by passed 'keyids').
* unknown -- Signature from key that is not available in 'tuf.keydb', or if
'role' is None.
* unknown signing schemes -- Signature from key with unknown signing
scheme.
* untrusted -- Valid signature from key that is available in 'tuf.keydb',
but is not trusted for the passed role as per 'tuf.roledb' or the passed
'keyids'.
NOTE: The result may contain duplicate keyids or keyids that reference the
same key, if 'signable' lists multiple signatures from the same key.
<Arguments>
signable:
A dictionary containing a list of signatures and a 'signed' identifier.
signable = {'signed': 'signer',
'signatures': [{'keyid': keyid,
'sig': sig}]}
Conformant to tuf.formats.SIGNABLE_SCHEMA.
role:
TUF role string (e.g. 'root', 'targets', 'snapshot' or timestamp).
threshold:
Rather than reference the role's threshold as set in tuf.roledb.py, use
the given 'threshold' to calculate the signature status of 'signable'.
'threshold' is an integer value that sets the role's threshold value, or
the minimum number of signatures needed for metadata to be considered
fully signed.
keyids:
Similar to the 'threshold' argument, use the supplied list of 'keyids'
to calculate the signature status, instead of referencing the keyids
in tuf.roledb.py for 'role'.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'signable' does not have the
correct format.
tuf.exceptions.UnknownRoleError, if 'role' is not recognized.
<Side Effects>
None.
<Returns>
A dictionary representing the status of the signatures in 'signable'.
Conformant to tuf.formats.SIGNATURESTATUS_SCHEMA.
"""
# Do the arguments have the correct format? This check will ensure that
# arguments have the appropriate number of objects and object types, and that
# all dict keys are properly named. Raise
# 'securesystemslib.exceptions.FormatError' if the check fails.
tuf.formats.SIGNABLE_SCHEMA.check_match(signable)
securesystemslib.formats.NAME_SCHEMA.check_match(repository_name)
if role is not None:
tuf.formats.ROLENAME_SCHEMA.check_match(role)
if threshold is not None:
tuf.formats.THRESHOLD_SCHEMA.check_match(threshold)
if keyids is not None:
securesystemslib.formats.KEYIDS_SCHEMA.check_match(keyids)
# The signature status dictionary returned.
signature_status = {}
good_sigs = []
bad_sigs = []
unknown_sigs = []
untrusted_sigs = []
unknown_signing_schemes = []
# Extract the relevant fields from 'signable' that will allow us to identify
# the different classes of keys (i.e., good_sigs, bad_sigs, etc.).
signed = securesystemslib.formats.encode_canonical(signable['signed']).encode('utf-8')
signatures = signable['signatures']
# Iterate the signatures and enumerate the signature_status fields.
# (i.e., good_sigs, bad_sigs, etc.).
for signature in signatures:
keyid = signature['keyid']
# Does the signature use an unrecognized key?
try:
key = tuf.keydb.get_key(keyid, repository_name)
except tuf.exceptions.UnknownKeyError:
unknown_sigs.append(keyid)
continue
# Does the signature use an unknown/unsupported signing scheme?
try:
valid_sig = securesystemslib.keys.verify_signature(key, signature, signed)
except securesystemslib.exceptions.UnsupportedAlgorithmError:
unknown_signing_schemes.append(keyid)
continue
# We are now dealing with either a trusted or untrusted key...
if valid_sig:
if role is not None:
# Is this an unauthorized key? (a keyid associated with 'role')
# Note that if the role is not known, tuf.exceptions.UnknownRoleError
# is raised here.
if keyids is None:
keyids = tuf.roledb.get_role_keyids(role, repository_name)
if keyid not in keyids:
untrusted_sigs.append(keyid)
continue
# This is an unset role, thus an unknown signature.
else:
unknown_sigs.append(keyid)
continue
# Identify good/authorized key.
good_sigs.append(keyid)
else:
# This is a bad signature for a trusted key.
bad_sigs.append(keyid)
# Retrieve the threshold value for 'role'. Raise
# tuf.exceptions.UnknownRoleError if we were given an invalid role.
if role is not None:
if threshold is None:
# Note that if the role is not known, tuf.exceptions.UnknownRoleError is
# raised here.
threshold = tuf.roledb.get_role_threshold(
role, repository_name=repository_name)
else:
logger.debug('Not using roledb.py\'s threshold for ' + repr(role))
else:
threshold = 0
# Build the signature_status dict.
signature_status['threshold'] = threshold
signature_status['good_sigs'] = good_sigs
signature_status['bad_sigs'] = bad_sigs
signature_status['unknown_sigs'] = unknown_sigs
signature_status['untrusted_sigs'] = untrusted_sigs
signature_status['unknown_signing_schemes'] = unknown_signing_schemes
return signature_status
def verify(signable, role, repository_name='default', threshold=None,
keyids=None):
"""
<Purpose>
Verify that 'signable' has a valid threshold of authorized signatures
identified by unique keyids. The threshold and whether a keyid is
authorized is determined by querying the 'threshold' and 'keyids' info for
the passed 'role' in 'tuf.roledb'. Both values can be overwritten by
passing the 'threshold' or 'keyids' arguments.
NOTE:
- Signatures with identical authorized keyids only count towards the
threshold once.
- Signatures with different authorized keyids each count towards the
threshold, even if the keyids identify the same key.
<Arguments>
signable:
A dictionary containing a list of signatures and a 'signed' identifier
that conforms to SIGNABLE_SCHEMA, e.g.:
signable = {'signed':, 'signatures': [{'keyid':, 'method':, 'sig':}]}
role:
TUF role string (e.g. 'root', 'targets', 'snapshot' or timestamp).
threshold:
Rather than reference the role's threshold as set in tuf.roledb.py, use
the given 'threshold' to calculate the signature status of 'signable'.
'threshold' is an integer value that sets the role's threshold value, or
the minimum number of signatures needed for metadata to be considered
fully signed.
keyids:
Similar to the 'threshold' argument, use the supplied list of 'keyids'
to calculate the signature status, instead of referencing the keyids
in tuf.roledb.py for 'role'.
<Exceptions>
tuf.exceptions.UnknownRoleError, if 'role' is not recognized.
securesystemslib.exceptions.FormatError, if 'signable' is not formatted
correctly.
securesystemslib.exceptions.Error, if an invalid threshold is encountered.
<Side Effects>
tuf.sig.get_signature_status() called. Any exceptions thrown by
get_signature_status() will be caught here and re-raised.
<Returns>
Boolean. True if the number of good unique (by keyid) signatures >= the
role's threshold, False otherwise.
"""
tuf.formats.SIGNABLE_SCHEMA.check_match(signable)
tuf.formats.ROLENAME_SCHEMA.check_match(role)
securesystemslib.formats.NAME_SCHEMA.check_match(repository_name)
# Retrieve the signature status. tuf.sig.get_signature_status() raises:
# tuf.exceptions.UnknownRoleError
# securesystemslib.exceptions.FormatError. 'threshold' and 'keyids' are also
# validated.
status = get_signature_status(signable, role, repository_name, threshold, keyids)
# Retrieve the role's threshold and the authorized keys of 'status'
threshold = status['threshold']
good_sigs = status['good_sigs']
# Does 'status' have the required threshold of signatures?
# First check for invalid threshold values before returning result.
# Note: get_signature_status() is expected to verify that 'threshold' is
# not None or <= 0.
if threshold is None or threshold <= 0: #pragma: no cover
raise securesystemslib.exceptions.Error("Invalid threshold: " + repr(threshold))
return len(set(good_sigs)) >= threshold
def may_need_new_keys(signature_status):
"""
<Purpose>
Return true iff downloading a new set of keys might tip this
signature status over to valid. This is determined by checking
if either the number of unknown or untrusted keys is > 0.
<Arguments>
signature_status:
The dictionary returned by tuf.sig.get_signature_status().
<Exceptions>
securesystemslib.exceptions.FormatError, if 'signature_status does not have
the correct format.
<Side Effects>
None.
<Returns>
Boolean.
"""
# Does 'signature_status' have the correct format?
# This check will ensure 'signature_status' has the appropriate number
# of objects and object types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fails.
tuf.formats.SIGNATURESTATUS_SCHEMA.check_match(signature_status)
unknown = signature_status['unknown_sigs']
untrusted = signature_status['untrusted_sigs']
return len(unknown) or len(untrusted)
def generate_rsa_signature(signed, rsakey_dict):
"""
<Purpose>
Generate a new signature dict presumably to be added to the 'signatures'
field of 'signable'. The 'signable' dict is of the form:
{'signed': 'signer',
'signatures': [{'keyid': keyid,
'method': 'evp',
'sig': sig}]}
The 'signed' argument is needed here for the signing process.
The 'rsakey_dict' argument is used to generate 'keyid', 'method', and 'sig'.
The caller should ensure the returned signature is not already in
'signable'.
<Arguments>
signed:
The data used by 'securesystemslib.keys.create_signature()' to generate
signatures. It is stored in the 'signed' field of 'signable'.
rsakey_dict:
The RSA key, a 'securesystemslib.formats.RSAKEY_SCHEMA' dictionary.
Used here to produce 'keyid', 'method', and 'sig'.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'rsakey_dict' does not have the
correct format.
TypeError, if a private key is not defined for 'rsakey_dict'.
<Side Effects>
None.
<Returns>
Signature dictionary conformant to securesystemslib.formats.SIGNATURE_SCHEMA.
Has the form:
{'keyid': keyid, 'method': 'evp', 'sig': sig}
"""
# We need 'signed' in canonical JSON format to generate
# the 'method' and 'sig' fields of the signature.
signed = securesystemslib.formats.encode_canonical(signed).encode('utf-8')
# Generate the RSA signature.
# Raises securesystemslib.exceptions.FormatError and TypeError.
signature = securesystemslib.keys.create_signature(rsakey_dict, signed)
return signature
| ./CrossVul/dataset_final_sorted/CWE-347/py/good_4085_2 |
crossvul-python_data_good_4360_0 | import hashlib
import logging
import os
import warnings
from base64 import b64encode
from json import JSONDecodeError
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Type
from typing import Union
from typing import cast
from urllib.parse import parse_qs
from urllib.parse import urlparse
from jwkest import BadSyntax
from jwkest import as_bytes
from jwkest import jwe
from jwkest import jws
from jwkest import jwt
from jwkest.jwe import JWE
from requests import ConnectionError
from oic import oauth2
from oic import rndstr
from oic.exception import AccessDenied
from oic.exception import AuthnToOld
from oic.exception import AuthzError
from oic.exception import CommunicationError
from oic.exception import MissingParameter
from oic.exception import ParameterError
from oic.exception import PyoidcError
from oic.exception import RegistrationError
from oic.exception import RequestError
from oic.exception import SubMismatch
from oic.oauth2 import HTTP_ARGS
from oic.oauth2 import authz_error
from oic.oauth2.consumer import ConfigurationError
from oic.oauth2.exception import MissingRequiredAttribute
from oic.oauth2.exception import OtherError
from oic.oauth2.exception import ParseError
from oic.oauth2.message import ErrorResponse
from oic.oauth2.message import Message
from oic.oauth2.message import MessageFactory
from oic.oauth2.message import WrongSigningAlgorithm
from oic.oauth2.util import get_or_post
from oic.oic.message import SCOPE2CLAIMS
from oic.oic.message import AccessTokenResponse
from oic.oic.message import AuthorizationErrorResponse
from oic.oic.message import AuthorizationRequest
from oic.oic.message import AuthorizationResponse
from oic.oic.message import Claims
from oic.oic.message import ClaimsRequest
from oic.oic.message import ClientRegistrationErrorResponse
from oic.oic.message import EndSessionRequest
from oic.oic.message import IdToken
from oic.oic.message import JasonWebToken
from oic.oic.message import OIDCMessageFactory
from oic.oic.message import OpenIDRequest
from oic.oic.message import OpenIDSchema
from oic.oic.message import RefreshSessionRequest
from oic.oic.message import RegistrationRequest
from oic.oic.message import RegistrationResponse
from oic.oic.message import TokenErrorResponse
from oic.oic.message import UserInfoErrorResponse
from oic.oic.message import UserInfoRequest
from oic.utils import time_util
from oic.utils.http_util import Response
from oic.utils.keyio import KeyJar
from oic.utils.sanitize import sanitize
from oic.utils.settings import OicClientSettings
from oic.utils.settings import OicServerSettings
from oic.utils.settings import PyoidcSettings
from oic.utils.webfinger import OIC_ISSUER
from oic.utils.webfinger import WebFinger
__author__ = "rohe0002"
logger = logging.getLogger(__name__)
ENDPOINTS = [
"authorization_endpoint",
"token_endpoint",
"userinfo_endpoint",
"refresh_session_endpoint",
"end_session_endpoint",
"registration_endpoint",
"check_id_endpoint",
]
RESPONSE2ERROR: Dict[str, List] = {
"AuthorizationResponse": [AuthorizationErrorResponse, TokenErrorResponse],
"AccessTokenResponse": [TokenErrorResponse],
"IdToken": [ErrorResponse],
"RegistrationResponse": [ClientRegistrationErrorResponse],
"OpenIDSchema": [UserInfoErrorResponse],
}
REQUEST2ENDPOINT = {
"AuthorizationRequest": "authorization_endpoint",
"OpenIDRequest": "authorization_endpoint",
"AccessTokenRequest": "token_endpoint",
"RefreshAccessTokenRequest": "token_endpoint",
"UserInfoRequest": "userinfo_endpoint",
"CheckSessionRequest": "check_session_endpoint",
"CheckIDRequest": "check_id_endpoint",
"EndSessionRequest": "end_session_endpoint",
"RefreshSessionRequest": "refresh_session_endpoint",
"RegistrationRequest": "registration_endpoint",
"RotateSecret": "registration_endpoint",
# ---
"ResourceRequest": "resource_endpoint",
"TokenIntrospectionRequest": "introspection_endpoint",
"TokenRevocationRequest": "revocation_endpoint",
"ROPCAccessTokenRequest": "token_endpoint",
}
# -----------------------------------------------------------------------------
JWT_BEARER = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer"
SAML2_BEARER_GRANT_TYPE = "urn:ietf:params:oauth:grant-type:saml2-bearer"
# This should probably be part of the configuration
MAX_AUTHENTICATION_AGE = 86400
DEF_SIGN_ALG = {
"id_token": "RS256",
"openid_request_object": "RS256",
"client_secret_jwt": "HS256",
"private_key_jwt": "RS256",
}
# -----------------------------------------------------------------------------
ACR_LISTS = [["0", "1", "2", "3", "4"]]
def verify_acr_level(req, level):
if req is None:
return level
elif "values" in req:
for _r in req["values"]:
for alist in ACR_LISTS:
try:
if alist.index(_r) <= alist.index(level):
return level
except ValueError:
pass
else: # Required or Optional
return level
raise AccessDenied("", req)
def deser_id_token(inst, txt=""):
if not txt:
return None
else:
return IdToken().from_jwt(txt, keyjar=inst.keyjar)
# -----------------------------------------------------------------------------
def make_openid_request(
arq,
keys=None,
userinfo_claims=None,
idtoken_claims=None,
request_object_signing_alg=None,
**kwargs,
):
"""
Construct the specification of what I want returned.
The request will be signed.
:param arq: The Authorization request
:param keys: Keys to use for signing/encrypting
:param userinfo_claims: UserInfo claims
:param idtoken_claims: IdToken claims
:param request_object_signing_alg: Which signing algorithm to use
:return: JWT encoded OpenID request
"""
oir_args = {}
for prop in OpenIDRequest.c_param.keys():
try:
oir_args[prop] = arq[prop]
except KeyError:
pass
for attr in ["scope", "response_type"]:
if attr in oir_args:
oir_args[attr] = " ".join(oir_args[attr])
c_args = {}
if userinfo_claims is not None:
# UserInfoClaims
c_args["userinfo"] = Claims(**userinfo_claims)
if idtoken_claims is not None:
# IdTokenClaims
c_args["id_token"] = Claims(**idtoken_claims)
if c_args:
oir_args["claims"] = ClaimsRequest(**c_args)
oir = OpenIDRequest(**oir_args)
return oir.to_jwt(key=keys, algorithm=request_object_signing_alg)
class Token(oauth2.Token):
pass
class Grant(oauth2.Grant):
_authz_resp = AuthorizationResponse
_acc_resp = AccessTokenResponse
_token_class = Token
def add_token(self, resp):
tok = self._token_class(resp)
if tok.access_token:
self.tokens.append(tok)
else:
_tmp = getattr(tok, "id_token", None)
if _tmp:
self.tokens.append(tok)
PREFERENCE2PROVIDER = {
"request_object_signing_alg": "request_object_signing_alg_values_supported",
"request_object_encryption_alg": "request_object_encryption_alg_values_supported",
"request_object_encryption_enc": "request_object_encryption_enc_values_supported",
"userinfo_signed_response_alg": "userinfo_signing_alg_values_supported",
"userinfo_encrypted_response_alg": "userinfo_encryption_alg_values_supported",
"userinfo_encrypted_response_enc": "userinfo_encryption_enc_values_supported",
"id_token_signed_response_alg": "id_token_signing_alg_values_supported",
"id_token_encrypted_response_alg": "id_token_encryption_alg_values_supported",
"id_token_encrypted_response_enc": "id_token_encryption_enc_values_supported",
"default_acr_values": "acr_values_supported",
"subject_type": "subject_types_supported",
"token_endpoint_auth_method": "token_endpoint_auth_methods_supported",
"token_endpoint_auth_signing_alg": "token_endpoint_auth_signing_alg_values_supported",
"response_types": "response_types_supported",
"grant_types": "grant_types_supported",
}
PROVIDER2PREFERENCE = dict([(v, k) for k, v in PREFERENCE2PROVIDER.items()])
PROVIDER_DEFAULT = {
"token_endpoint_auth_method": "client_secret_basic",
"id_token_signed_response_alg": "RS256",
}
PARAMMAP = {
"sign": "%s_signed_response_alg",
"alg": "%s_encrypted_response_alg",
"enc": "%s_encrypted_response_enc",
}
rt2gt = {
"code": ["authorization_code"],
"id_token": ["implicit"],
"id_token token": ["implicit"],
"code id_token": ["authorization_code", "implicit"],
"code token": ["authorization_code", "implicit"],
"code id_token token": ["authorization_code", "implicit"],
}
def response_types_to_grant_types(resp_types, **kwargs):
_res = set()
if "grant_types" in kwargs:
_res.update(set(kwargs["grant_types"]))
for response_type in resp_types:
_rt = response_type.split(" ")
_rt.sort()
try:
_gt = rt2gt[" ".join(_rt)]
except KeyError:
raise ValueError("No such response type combination: {}".format(resp_types))
else:
_res.update(set(_gt))
return list(_res)
def claims_match(value, claimspec):
"""
Implement matching according to section 5.5.1 of http://openid.net/specs/openid-connect-core-1_0.html.
The lack of value is not checked here.
Also the text doesn't prohibit having both 'value' and 'values'.
:param value: single value or list of values
:param claimspec: None or dictionary with 'essential', 'value' or 'values'
as key
:return: Boolean
"""
if claimspec is None: # match anything
return True
matched = False
for key, val in claimspec.items():
if key == "value":
if value == val:
matched = True
elif key == "values":
if value in val:
matched = True
elif key == "essential":
# Whether it's essential or not doesn't change anything here
continue
if matched:
break
if matched is False:
if list(claimspec.keys()) == ["essential"]:
return True
return matched
class Client(oauth2.Client):
_endpoints = ENDPOINTS
def __init__(
self,
client_id=None,
client_prefs=None,
client_authn_method=None,
keyjar=None,
verify_ssl=None,
config=None,
client_cert=None,
requests_dir="requests",
message_factory: Type[MessageFactory] = OIDCMessageFactory,
settings: PyoidcSettings = None,
):
"""
Initialize the instance.
Keyword Args:
settings
Instance of :class:`OauthClientSettings` with configuration options.
Currently used settings are:
- verify_ssl
- client_cert
- timeout
"""
self.settings = settings or OicClientSettings()
if verify_ssl is not None:
warnings.warn(
"`verify_ssl` is deprecated, please use `settings` instead if you need to set a non-default value.",
DeprecationWarning,
stacklevel=2,
)
self.settings.verify_ssl = verify_ssl
if client_cert is not None:
warnings.warn(
"`client_cert` is deprecated, please use `settings` instead if you need to set a non-default value.",
DeprecationWarning,
stacklevel=2,
)
self.settings.client_cert = client_cert
oauth2.Client.__init__(
self,
client_id,
client_authn_method=client_authn_method,
keyjar=keyjar,
config=config,
message_factory=message_factory,
settings=self.settings,
)
self.file_store = "./file/"
self.file_uri = "http://localhost/"
self.base_url = ""
# OpenID connect specific endpoints
for endpoint in ENDPOINTS:
setattr(self, endpoint, "")
self.id_token: Dict[str, Token] = {}
self.log = None
self.request2endpoint = REQUEST2ENDPOINT
self.response2error = RESPONSE2ERROR
self.grant_class = Grant
self.token_class = Token
self.provider_info = Message()
self.registration_response: RegistrationResponse = RegistrationResponse()
self.client_prefs = client_prefs or {}
self.behaviour: Dict[str, Any] = {}
self.scope = ["openid"]
self.wf = WebFinger(OIC_ISSUER)
self.wf.httpd = self
self.allow = {}
self.post_logout_redirect_uris: List[str] = []
self.registration_expires = 0
self.registration_access_token = None
self.id_token_max_age = 0
# Default key by kid for different key types
# For instance {'sig': {"RSA":"abc"}}
self.kid = {"sig": {}, "enc": {}}
self.requests_dir = requests_dir
def _get_id_token(self, **kwargs):
try:
return kwargs["id_token"]
except KeyError:
grant = self.get_grant(**kwargs)
if grant:
try:
_scope = kwargs["scope"]
except KeyError:
_scope = None
for token in grant.tokens:
if token.scope and _scope:
flag = True
for item in _scope:
if item not in token.scope:
flag = False
break
if not flag:
break
if token.id_token:
return token.id_token.jwt
return None
def request_object_encryption(self, msg, **kwargs):
try:
encalg = kwargs["request_object_encryption_alg"]
except KeyError:
try:
encalg = self.behaviour["request_object_encryption_alg"]
except KeyError:
return msg
try:
encenc = kwargs["request_object_encryption_enc"]
except KeyError:
try:
encenc = self.behaviour["request_object_encryption_enc"]
except KeyError:
raise MissingRequiredAttribute(
"No request_object_encryption_enc specified"
)
_jwe = JWE(msg, alg=encalg, enc=encenc)
_kty = jwe.alg2keytype(encalg)
try:
_kid = kwargs["enc_kid"]
except KeyError:
_kid = ""
if "target" not in kwargs:
raise MissingRequiredAttribute("No target specified")
if _kid:
_keys = self.keyjar.get_encrypt_key(_kty, owner=kwargs["target"], kid=_kid)
_jwe["kid"] = _kid
else:
_keys = self.keyjar.get_encrypt_key(_kty, owner=kwargs["target"])
return _jwe.encrypt(_keys)
@staticmethod
def construct_redirect_uri(**kwargs):
_filedir = kwargs["local_dir"]
if not os.path.isdir(_filedir):
os.makedirs(_filedir)
_webpath = kwargs["base_path"]
_name = rndstr(10) + ".jwt"
filename = os.path.join(_filedir, _name)
while os.path.exists(filename):
_name = rndstr(10)
filename = os.path.join(_filedir, _name)
_webname = "%s%s" % (_webpath, _name)
return filename, _webname
def filename_from_webname(self, webname):
_filedir = self.requests_dir
if not os.path.isdir(_filedir):
os.makedirs(_filedir)
if webname.startswith(self.base_url):
return webname[len(self.base_url) :]
else:
raise ValueError("Invalid webname, must start with base_url")
def construct_AuthorizationRequest(
self, request=None, request_args=None, extra_args=None, **kwargs
):
if request_args is not None:
if "nonce" not in request_args:
_rt = request_args["response_type"]
if "token" in _rt or "id_token" in _rt:
request_args["nonce"] = rndstr(32)
elif "response_type" in kwargs:
if "token" in kwargs["response_type"]:
request_args = {"nonce": rndstr(32)}
else: # Never wrong to specify a nonce
request_args = {"nonce": rndstr(32)}
request_param = kwargs.get("request_param")
if "request_method" in kwargs:
if kwargs["request_method"] == "file":
request_param = "request_uri"
else:
request_param = "request"
del kwargs["request_method"]
areq = super().construct_AuthorizationRequest(
request=request, request_args=request_args, extra_args=extra_args, **kwargs
)
if request_param:
alg = None
for arg in ["request_object_signing_alg", "algorithm"]:
try: # Trumps everything
alg = kwargs[arg]
except KeyError:
pass
else:
break
if not alg:
try:
alg = self.behaviour["request_object_signing_alg"]
except KeyError:
alg = "none"
kwargs["request_object_signing_alg"] = alg
if "keys" not in kwargs and alg and alg != "none":
_kty = jws.alg2keytype(alg)
try:
_kid = kwargs["sig_kid"]
except KeyError:
_kid = self.kid["sig"].get(_kty, None)
kwargs["keys"] = self.keyjar.get_signing_key(_kty, kid=_kid)
_req = make_openid_request(areq, **kwargs)
# Should the request be encrypted
_req = self.request_object_encryption(_req, **kwargs)
if request_param == "request":
areq["request"] = _req
else:
try:
_webname = self.registration_response["request_uris"][0]
filename = self.filename_from_webname(_webname)
except KeyError:
filename, _webname = self.construct_redirect_uri(**kwargs)
with open(filename, mode="w") as fid:
fid.write(_req)
areq["request_uri"] = _webname
return areq
def construct_UserInfoRequest(
self, request=None, request_args=None, extra_args=None, **kwargs
):
if request is None:
request = self.message_factory.get_request_type("userinfo_endpoint")
if request_args is None:
request_args = {}
if "access_token" in request_args:
pass
else:
if "scope" not in kwargs:
kwargs["scope"] = "openid"
token = self.get_token(**kwargs)
if token is None:
raise MissingParameter("No valid token available")
request_args["access_token"] = token.access_token
return self.construct_request(request, request_args, extra_args)
def construct_RegistrationRequest(
self, request=None, request_args=None, extra_args=None, **kwargs
):
if request is None:
request = self.message_factory.get_request_type("registration_endpoint")
return self.construct_request(request, request_args, extra_args)
def construct_RefreshSessionRequest(
self, request=None, request_args=None, extra_args=None, **kwargs
):
if request is None:
request = self.message_factory.get_request_type("refreshsession_endpoint")
return self.construct_request(request, request_args, extra_args)
def _id_token_based(self, request, request_args=None, extra_args=None, **kwargs):
if request_args is None:
request_args = {}
try:
_prop = kwargs["prop"]
except KeyError:
_prop = "id_token"
if _prop in request_args:
pass
else:
raw_id_token = self._get_id_token(**kwargs)
if raw_id_token is None:
raise MissingParameter("No valid id token available")
request_args[_prop] = raw_id_token
return self.construct_request(request, request_args, extra_args)
def construct_CheckSessionRequest(
self, request=None, request_args=None, extra_args=None, **kwargs
):
if request is None:
request = self.message_factory.get_request_type("checksession_endpoint")
return self._id_token_based(request, request_args, extra_args, **kwargs)
def construct_CheckIDRequest(
self, request=None, request_args=None, extra_args=None, **kwargs
):
if request is None:
request = self.message_factory.get_request_type("checkid_endpoint")
# access_token is where the id_token will be placed
return self._id_token_based(
request, request_args, extra_args, prop="access_token", **kwargs
)
def construct_EndSessionRequest(
self, request=None, request_args=None, extra_args=None, **kwargs
):
if request is None:
request = self.message_factory.get_request_type("endsession_endpoint")
if request_args is None:
request_args = {}
if "state" in request_args and "state" not in kwargs:
kwargs["state"] = request_args["state"]
return self._id_token_based(request, request_args, extra_args, **kwargs)
def do_authorization_request(
self,
state="",
body_type="",
method="GET",
request_args=None,
extra_args=None,
http_args=None,
**kwargs,
):
algs = self.sign_enc_algs("id_token")
if "code_challenge" in self.config:
_args, code_verifier = self.add_code_challenge()
request_args.update(_args)
return super().do_authorization_request(
state=state,
body_type=body_type,
method=method,
request_args=request_args,
extra_args=extra_args,
http_args=http_args,
algs=algs,
)
def do_access_token_request(
self,
scope="",
state="",
body_type="json",
method="POST",
request_args=None,
extra_args=None,
http_args=None,
authn_method="client_secret_basic",
**kwargs,
):
atr = super().do_access_token_request(
scope=scope,
state=state,
body_type=body_type,
method=method,
request_args=request_args,
extra_args=extra_args,
http_args=http_args,
authn_method=authn_method,
**kwargs,
)
try:
_idt = atr["id_token"]
except KeyError:
pass
else:
try:
if self.state2nonce[state] != _idt["nonce"]:
raise ParameterError('Someone has messed with "nonce"')
except KeyError:
pass
return atr
def do_registration_request(
self,
scope="",
state="",
body_type="json",
method="POST",
request_args=None,
extra_args=None,
http_args=None,
):
request = self.message_factory.get_request_type("registration_endpoint")
url, body, ht_args, csi = self.request_info(
request,
method=method,
request_args=request_args,
extra_args=extra_args,
scope=scope,
state=state,
)
if http_args is None:
http_args = ht_args
else:
http_args.update(http_args)
response_cls = self.message_factory.get_response_type("registration_endpoint")
response = self.request_and_return(
url, response_cls, method, body, body_type, state=state, http_args=http_args
)
return response
def do_check_session_request(
self,
scope="",
state="",
body_type="json",
method="GET",
request_args=None,
extra_args=None,
http_args=None,
):
request = self.message_factory.get_request_type("checksession_endpoint")
response_cls = self.message_factory.get_response_type("checksession_endpoint")
url, body, ht_args, csi = self.request_info(
request,
method=method,
request_args=request_args,
extra_args=extra_args,
scope=scope,
state=state,
)
if http_args is None:
http_args = ht_args
else:
http_args.update(http_args)
return self.request_and_return(
url, response_cls, method, body, body_type, state=state, http_args=http_args
)
def do_check_id_request(
self,
scope="",
state="",
body_type="json",
method="GET",
request_args=None,
extra_args=None,
http_args=None,
):
request = self.message_factory.get_request_type("checkid_endpoint")
response_cls = self.message_factory.get_response_type("checkid_endpoint")
url, body, ht_args, csi = self.request_info(
request,
method=method,
request_args=request_args,
extra_args=extra_args,
scope=scope,
state=state,
)
if http_args is None:
http_args = ht_args
else:
http_args.update(http_args)
return self.request_and_return(
url, response_cls, method, body, body_type, state=state, http_args=http_args
)
def do_end_session_request(
self,
scope="",
state="",
body_type="",
method="GET",
request_args=None,
extra_args=None,
http_args=None,
):
request = self.message_factory.get_request_type("endsession_endpoint")
response_cls = self.message_factory.get_response_type("endsession_endpoint")
url, body, ht_args, _ = self.request_info(
request,
method=method,
request_args=request_args,
extra_args=extra_args,
scope=scope,
state=state,
)
if http_args is None:
http_args = ht_args
else:
http_args.update(http_args)
return self.request_and_return(
url, response_cls, method, body, body_type, state=state, http_args=http_args
)
def user_info_request(self, method="GET", state="", scope="", **kwargs):
uir = self.message_factory.get_request_type("userinfo_endpoint")()
logger.debug("[user_info_request]: kwargs:%s" % (sanitize(kwargs),))
token: Optional[Token] = None
if "token" in kwargs:
if kwargs["token"]:
uir["access_token"] = kwargs["token"]
token = Token()
token.token_type = "Bearer"
token.access_token = kwargs["token"]
kwargs["behavior"] = "use_authorization_header"
else:
# What to do ? Need a callback
pass
elif "access_token" in kwargs and kwargs["access_token"]:
uir["access_token"] = kwargs["access_token"]
del kwargs["access_token"]
elif state:
token = self.grant[state].get_token(scope)
if token is None:
raise AccessDenied("invalid_token")
if token.is_valid():
uir["access_token"] = token.access_token
if (
token.token_type
and token.token_type.lower() == "bearer"
and method == "GET"
):
kwargs["behavior"] = "use_authorization_header"
else:
# raise oauth2.OldAccessToken
if self.log:
self.log.info("do access token refresh")
try:
self.do_access_token_refresh(token=token, state=state)
token = cast(Token, self.grant[state].get_token(scope))
uir["access_token"] = token.access_token
except Exception:
raise
uri = self._endpoint("userinfo_endpoint", **kwargs)
# If access token is a bearer token it might be sent in the
# authorization header
# 4 ways of sending the access_token:
# - POST with token in authorization header
# - POST with token in message body
# - GET with token in authorization header
# - GET with token as query parameter
if "behavior" in kwargs:
_behav = kwargs["behavior"]
_token = uir["access_token"]
_ttype = ""
try:
_ttype = kwargs["token_type"]
except KeyError:
if token:
try:
_ttype = cast(str, token.token_type)
except AttributeError:
raise MissingParameter("Unspecified token type")
if "as_query_parameter" == _behav:
method = "GET"
elif token:
# use_authorization_header, token_in_message_body
if "use_authorization_header" in _behav:
token_header = "{type} {token}".format(
type=_ttype.capitalize(), token=_token
)
if "headers" in kwargs:
kwargs["headers"].update({"Authorization": token_header})
else:
kwargs["headers"] = {"Authorization": token_header}
if "token_in_message_body" not in _behav:
# remove the token from the request
del uir["access_token"]
path, body, kwargs = get_or_post(uri, method, uir, **kwargs)
h_args = dict([(k, v) for k, v in kwargs.items() if k in HTTP_ARGS])
return path, body, method, h_args
def do_user_info_request(
self, method="POST", state="", scope="openid", request="openid", **kwargs
):
kwargs["request"] = request
path, body, method, h_args = self.user_info_request(
method, state, scope, **kwargs
)
logger.debug(
"[do_user_info_request] PATH:%s BODY:%s H_ARGS: %s"
% (sanitize(path), sanitize(body), sanitize(h_args))
)
if self.events:
self.events.store("Request", {"body": body})
self.events.store("request_url", path)
self.events.store("request_http_args", h_args)
try:
resp = self.http_request(path, method, data=body, **h_args)
except oauth2.exception.MissingRequiredAttribute:
raise
if resp.status_code == 200:
if "application/json" in resp.headers["content-type"]:
sformat = "json"
elif "application/jwt" in resp.headers["content-type"]:
sformat = "jwt"
else:
raise PyoidcError(
"ERROR: Unexpected content-type: %s" % resp.headers["content-type"]
)
elif resp.status_code == 500:
raise PyoidcError("ERROR: Something went wrong: %s" % resp.text)
elif resp.status_code == 405:
# Method not allowed error
allowed_methods = [x.strip() for x in resp.headers["allow"].split(",")]
raise CommunicationError(
"Server responded with HTTP Error Code 405", "", allowed_methods
)
elif 400 <= resp.status_code < 500:
# the response text might be a OIDC message
try:
res = ErrorResponse().from_json(resp.text)
except Exception:
raise RequestError(resp.text)
else:
self.store_response(res, resp.text)
return res
else:
raise PyoidcError(
"ERROR: Something went wrong [%s]: %s" % (resp.status_code, resp.text)
)
try:
_schema = kwargs["user_info_schema"]
except KeyError:
_schema = OpenIDSchema
logger.debug("Reponse text: '%s'" % sanitize(resp.text))
_txt = resp.text
if sformat == "json":
res = _schema().from_json(txt=_txt)
else:
verify = kwargs.get("verify", True)
res = _schema().from_jwt(
_txt,
keyjar=self.keyjar,
sender=self.provider_info["issuer"],
verify=verify,
)
if "error" in res: # Error response
res = UserInfoErrorResponse(**res.to_dict())
if state:
# Verify userinfo sub claim against what's returned in the ID Token
idt = self.grant[state].get_id_token()
if idt:
if idt["sub"] != res["sub"]:
raise SubMismatch(
"Sub identifier not the same in userinfo and Id Token"
)
self.store_response(res, _txt)
return res
def get_userinfo_claims(
self, access_token, endpoint, method="POST", schema_class=OpenIDSchema, **kwargs
):
uir = UserInfoRequest(access_token=access_token)
h_args = dict([(k, v) for k, v in kwargs.items() if k in HTTP_ARGS])
if "authn_method" in kwargs:
http_args = self.init_authentication_method(**kwargs)
else:
# If nothing defined this is the default
http_args = self.init_authentication_method(uir, "bearer_header", **kwargs)
h_args.update(http_args)
path, body, kwargs = get_or_post(endpoint, method, uir, **kwargs)
try:
resp = self.http_request(path, method, data=body, **h_args)
except MissingRequiredAttribute:
raise
if resp.status_code == 200:
# FIXME: Could this also encounter application/jwt for encrypted userinfo
# the do_userinfo_request method already handles it
if "application/json" not in resp.headers["content-type"]:
raise PyoidcError(
"ERROR: content-type in response unexpected: %s"
% resp.headers["content-type"]
)
elif resp.status_code == 500:
raise PyoidcError("ERROR: Something went wrong: %s" % resp.text)
else:
raise PyoidcError(
"ERROR: Something went wrong [%s]: %s" % (resp.status_code, resp.text)
)
res = schema_class().from_json(txt=resp.text)
self.store_response(res, resp.text)
return res
def unpack_aggregated_claims(self, userinfo):
if userinfo["_claim_sources"]:
for csrc, spec in userinfo["_claim_sources"].items():
if "JWT" in spec:
aggregated_claims = Message().from_jwt(
spec["JWT"].encode("utf-8"), keyjar=self.keyjar, sender=csrc
)
claims = [
value
for value, src in userinfo["_claim_names"].items()
if src == csrc
]
if set(claims) != set(list(aggregated_claims.keys())):
logger.warning(
"Claims from claim source doesn't match what's in "
"the userinfo"
)
for key, vals in aggregated_claims.items():
userinfo[key] = vals
return userinfo
def fetch_distributed_claims(self, userinfo, callback=None):
for csrc, spec in userinfo["_claim_sources"].items():
if "endpoint" in spec:
if not spec["endpoint"].startswith("https://"):
logger.warning(
"Fetching distributed claims from an untrusted source: %s",
spec["endpoint"],
)
if "access_token" in spec:
_uinfo = self.do_user_info_request(
method="GET",
token=spec["access_token"],
userinfo_endpoint=spec["endpoint"],
verify=False,
)
else:
if callback:
_uinfo = self.do_user_info_request(
method="GET",
token=callback(spec["endpoint"]),
userinfo_endpoint=spec["endpoint"],
verify=False,
)
else:
_uinfo = self.do_user_info_request(
method="GET",
userinfo_endpoint=spec["endpoint"],
verify=False,
)
claims = [
value
for value, src in userinfo["_claim_names"].items()
if src == csrc
]
if set(claims) != set(list(_uinfo.keys())):
logger.warning(
"Claims from claim source doesn't match what's in "
"the userinfo"
)
for key, vals in _uinfo.items():
userinfo[key] = vals
# Remove the `_claim_sources` and `_claim_names` from userinfo and better be safe than sorry
if "_claim_sources" in userinfo:
del userinfo["_claim_sources"]
if "_claim_names" in userinfo:
del userinfo["_claim_names"]
return userinfo
def verify_alg_support(self, alg, usage, other):
"""
Verify that the algorithm to be used are supported by the other side.
:param alg: The algorithm specification
:param usage: In which context the 'alg' will be used.
The following values are supported:
- userinfo
- id_token
- request_object
- token_endpoint_auth
:param other: The identifier for the other side
:return: True or False
"""
try:
_pcr = self.provider_info
supported = _pcr["%s_algs_supported" % usage]
except KeyError:
try:
supported = getattr(self, "%s_algs_supported" % usage)
except AttributeError:
supported = None
if supported is None:
return True
else:
if alg in supported:
return True
else:
return False
def match_preferences(self, pcr=None, issuer=None):
"""
Match the clients preferences against what the provider can do.
:param pcr: Provider configuration response if available
:param issuer: The issuer identifier
"""
if not pcr:
pcr = self.provider_info
regreq = self.message_factory.get_request_type("registration_endpoint")
for _pref, _prov in PREFERENCE2PROVIDER.items():
try:
vals = self.client_prefs[_pref]
except KeyError:
continue
try:
_pvals = pcr[_prov]
except KeyError:
try:
self.behaviour[_pref] = PROVIDER_DEFAULT[_pref]
except KeyError:
if isinstance(pcr.c_param[_prov][0], list):
self.behaviour[_pref] = []
else:
self.behaviour[_pref] = None
continue
if isinstance(vals, str):
if vals in _pvals:
self.behaviour[_pref] = vals
else:
vtyp = regreq.c_param[_pref]
if isinstance(vtyp[0], list):
self.behaviour[_pref] = []
for val in vals:
if val in _pvals:
self.behaviour[_pref].append(val)
else:
for val in vals:
if val in _pvals:
self.behaviour[_pref] = val
break
if _pref not in self.behaviour:
raise ConfigurationError("OP couldn't match preference:%s" % _pref, pcr)
for key, val in self.client_prefs.items():
if key in self.behaviour:
continue
try:
vtyp = regreq.c_param[key]
if isinstance(vtyp[0], list):
pass
elif isinstance(val, list) and not isinstance(val, str):
val = val[0]
except KeyError:
pass
if key not in PREFERENCE2PROVIDER:
self.behaviour[key] = val
def store_registration_info(self, reginfo):
self.registration_response = reginfo
if "token_endpoint_auth_method" not in self.registration_response:
self.registration_response[
"token_endpoint_auth_method" # nosec
] = "client_secret_basic"
self.client_id = reginfo["client_id"]
try:
self.client_secret = reginfo["client_secret"]
except KeyError: # Not required
pass
else:
try:
self.registration_expires = reginfo["client_secret_expires_at"]
except KeyError:
pass
try:
self.registration_access_token = reginfo["registration_access_token"]
except KeyError:
pass
def handle_registration_info(self, response):
err_msg = "Got error response: {}"
unk_msg = "Unknown response: {}"
if response.status_code in [200, 201]:
resp = self.message_factory.get_response_type(
"registration_endpoint"
)().deserialize(response.text, "json")
# Some implementations sends back a 200 with an error message inside
try:
resp.verify()
except oauth2.message.MissingRequiredAttribute as err:
logger.error(err)
raise RegistrationError(err)
except Exception:
resp = ErrorResponse().deserialize(response.text, "json")
if resp.verify():
logger.error(err_msg.format(sanitize(resp.to_json())))
if self.events:
self.events.store("protocol response", resp)
raise RegistrationError(resp.to_dict())
else: # Something else
logger.error(unk_msg.format(sanitize(response.text)))
raise RegistrationError(response.text)
else:
# got a proper registration response
self.store_response(resp, response.text)
self.store_registration_info(resp)
elif 400 <= response.status_code <= 499:
try:
resp = ErrorResponse().deserialize(response.text, "json")
except JSONDecodeError:
logger.error(unk_msg.format(sanitize(response.text)))
raise RegistrationError(response.text)
if resp.verify():
logger.error(err_msg.format(sanitize(resp.to_json())))
if self.events:
self.events.store("protocol response", resp)
raise RegistrationError(resp.to_dict())
else: # Something else
logger.error(unk_msg.format(sanitize(response.text)))
raise RegistrationError(response.text)
else:
raise RegistrationError(response.text)
return resp
def registration_read(self, url="", registration_access_token=None):
"""
Read the client registration info from the given url.
:raises RegistrationError: If an error happend
:return: RegistrationResponse
"""
if not url:
url = self.registration_response["registration_client_uri"]
if not registration_access_token:
registration_access_token = self.registration_access_token
headers = {"Authorization": "Bearer %s" % registration_access_token}
rsp = self.http_request(url, "GET", headers=headers)
return self.handle_registration_info(rsp)
def generate_request_uris(self, request_dir):
"""
Need to generate a path that is unique for the OP combo.
:return: A list of uris
"""
m = hashlib.sha256()
m.update(as_bytes(self.provider_info["issuer"]))
m.update(as_bytes(self.base_url))
return "{}{}/{}".format(self.base_url, request_dir, m.hexdigest())
def create_registration_request(self, **kwargs):
"""
Create a registration request.
:param kwargs: parameters to the registration request
:return:
"""
req = self.message_factory.get_request_type("registration_endpoint")()
for prop in req.parameters():
try:
req[prop] = kwargs[prop]
except KeyError:
try:
req[prop] = self.behaviour[prop]
except KeyError:
pass
if "post_logout_redirect_uris" not in req:
try:
req["post_logout_redirect_uris"] = self.post_logout_redirect_uris
except AttributeError:
pass
if "redirect_uris" not in req:
try:
req["redirect_uris"] = self.redirect_uris
except AttributeError:
raise MissingRequiredAttribute("redirect_uris", req)
try:
if self.provider_info["require_request_uri_registration"] is True:
req["request_uris"] = self.generate_request_uris(self.requests_dir)
except KeyError:
pass
if "response_types" in req:
req["grant_types"] = response_types_to_grant_types(
req["response_types"], **kwargs
)
return req
def register(self, url, registration_token=None, **kwargs):
"""
Register the client at an OP.
:param url: The OPs registration endpoint
:param registration_token: Initial Access Token for registration endpoint
:param kwargs: parameters to the registration request
:return:
"""
req = self.create_registration_request(**kwargs)
logger.debug("[registration_request]: kwargs:%s" % (sanitize(kwargs),))
if self.events:
self.events.store("Protocol request", req)
headers = {"content-type": "application/json"}
if registration_token is not None:
try:
token = jwt.JWT()
token.unpack(registration_token)
except BadSyntax:
# no JWT
registration_token = b64encode(registration_token.encode()).decode()
finally:
headers["Authorization"] = "Bearer " + registration_token
rsp = self.http_request(url, "POST", data=req.to_json(), headers=headers)
return self.handle_registration_info(rsp)
def normalization(self, principal, idtype="mail"):
if idtype == "mail":
(_, domain) = principal.split("@")
subject = "acct:%s" % principal
elif idtype == "url":
p = urlparse(principal)
domain = p.netloc
subject = principal
else:
domain = ""
subject = principal
return subject, domain
def discover(self, principal, host=None):
return self.wf.discovery_query(principal, host=host)
def sign_enc_algs(self, typ):
resp = {}
for key, val in PARAMMAP.items():
try:
resp[key] = self.registration_response[val % typ]
except (TypeError, KeyError):
if key == "sign":
resp[key] = DEF_SIGN_ALG["id_token"]
return resp
def _verify_id_token(
self,
id_token,
nonce="",
acr_values=None,
auth_time=0,
max_age=0,
response_type="",
):
"""
Verify IdToken.
If the JWT alg Header Parameter uses a MAC based algorithm such as
HS256, HS384, or HS512, the octets of the UTF-8 representation of the
client_secret corresponding to the client_id contained in the aud
(audience) Claim are used as the key to validate the signature. For MAC
based algorithms, the behavior is unspecified if the aud is
multi-valued or if an azp value is present that is different than the
aud value.
:param id_token: The ID Token tp check
:param nonce: The nonce specified in the authorization request
:param acr_values: Asked for acr values
:param auth_time: An auth_time claim
:param max_age: Max age of authentication
"""
if self.provider_info["issuer"] != id_token["iss"]:
raise OtherError("issuer != iss")
if self.client_id not in id_token["aud"]:
raise OtherError("not intended for me")
if len(id_token["aud"]) > 1:
if "azp" not in id_token or id_token["azp"] != self.client_id:
raise OtherError("not intended for me")
_now = time_util.utc_time_sans_frac()
if _now > id_token["exp"]:
raise OtherError("Passed best before date")
if response_type != ["code"] and id_token.jws_header["alg"] == "none":
raise WrongSigningAlgorithm(
"none is not allowed outside Authorization Flow."
)
if (
self.id_token_max_age
and _now > int(id_token["iat"]) + self.id_token_max_age
):
raise OtherError("I think this ID token is to old")
if nonce and nonce != id_token["nonce"]:
raise OtherError("nonce mismatch")
if acr_values and id_token["acr"] not in acr_values:
raise OtherError("acr mismatch")
if max_age and _now > int(id_token["auth_time"] + max_age):
raise AuthnToOld("To old authentication")
if auth_time:
if not claims_match(id_token["auth_time"], {"auth_time": auth_time}):
raise AuthnToOld("To old authentication")
def verify_id_token(self, id_token, authn_req):
kwa = {}
try:
kwa["nonce"] = authn_req["nonce"]
except KeyError:
pass
for param in ["acr_values", "max_age", "response_type"]:
try:
kwa[param] = authn_req[param]
except KeyError:
pass
self._verify_id_token(id_token, **kwa)
class Server(oauth2.Server):
"""OIC Server class."""
def __init__(
self,
verify_ssl: bool = None,
keyjar: KeyJar = None,
client_cert: Union[str, Tuple[str, str]] = None,
timeout: float = None,
message_factory: Type[MessageFactory] = OIDCMessageFactory,
settings: PyoidcSettings = None,
):
"""Initialize the server."""
self.settings = settings or OicServerSettings()
if verify_ssl is not None:
warnings.warn(
"`verify_ssl` is deprecated, please use `settings` instead if you need to set a non-default value.",
DeprecationWarning,
stacklevel=2,
)
self.settings.verify_ssl = verify_ssl
if client_cert is not None:
warnings.warn(
"`client_cert` is deprecated, please use `settings` instead if you need to set a non-default value.",
DeprecationWarning,
stacklevel=2,
)
self.settings.client_cert = client_cert
if timeout is not None:
warnings.warn(
"`timeout` is deprecated, please use `settings` instead if you need to set a non-default value.",
DeprecationWarning,
stacklevel=2,
)
self.settings.timeout = timeout
super().__init__(
keyjar=keyjar,
message_factory=message_factory,
settings=self.settings,
)
@staticmethod
def _parse_urlencoded(url=None, query=None):
if url:
parts = urlparse(url)
scheme, netloc, path, params, query, fragment = parts[:6]
return parse_qs(query)
def handle_request_uri(self, request_uri, verify=True, sender=""):
"""
Handle request URI.
:param request_uri: URL pointing to where the signed request should be fetched from.
:param verify: Whether the signature on the request should be verified.
Don't use anything but the default unless you REALLY know what you're doing
:param sender: The issuer of the request JWT.
:return:
"""
# Do a HTTP get
logger.debug("Get request from request_uri: {}".format(request_uri))
try:
http_req = self.http_request(request_uri)
except ConnectionError:
logger.error("Connection Error")
return authz_error("invalid_request_uri")
if not http_req:
logger.error("Nothing returned")
return authz_error("invalid_request_uri")
elif http_req.status_code >= 400:
logger.error("HTTP error {}:{}".format(http_req.status_code, http_req.text))
raise AuthzError("invalid_request")
# http_req.text is a signed JWT
try:
logger.debug("request txt: {}".format(http_req.text))
req = self.parse_jwt_request(
txt=http_req.text, verify=verify, sender=sender
)
except Exception as err:
logger.error(
"{}:{} encountered while parsing fetched request".format(
err.__class__, err
)
)
raise AuthzError("invalid_openid_request_object")
logger.debug("Fetched request: {}".format(req))
return req
def parse_authorization_request(
self, request=AuthorizationRequest, url=None, query=None, keys=None
):
if url:
parts = urlparse(url)
scheme, netloc, path, params, query, fragment = parts[:6]
if isinstance(query, dict):
sformat = "dict"
else:
sformat = "urlencoded"
_req = self._parse_request(request, query, sformat, verify=False)
if self.events:
self.events.store("Request", _req)
_req_req: Union[Message, Dict[str, Any]] = {}
try:
_request = _req["request"]
except KeyError:
try:
_url = _req["request_uri"]
except KeyError:
pass
else:
_req_req = self.handle_request_uri(
_url, verify=False, sender=_req["client_id"]
)
else:
if isinstance(_request, Message):
_req_req = _request
else:
try:
_req_req = self.parse_jwt_request(
request, txt=_request, verify=False
)
except Exception:
_req_req = self._parse_request(
request, _request, "urlencoded", verify=False
)
else: # remove JWT attributes
for attr in JasonWebToken.c_param:
try:
del _req_req[attr]
except KeyError:
pass
if isinstance(_req_req, Response):
return _req_req
if _req_req:
if self.events:
self.events.store("Signed Request", _req_req)
for key, val in _req.items():
if key in ["request", "request_uri"]:
continue
if key not in _req_req:
_req_req[key] = val
_req = _req_req
if self.events:
self.events.store("Combined Request", _req)
try:
_req.verify(keyjar=self.keyjar)
except Exception as err:
if self.events:
self.events.store("Exception", err)
logger.error(err)
raise
return _req
def parse_jwt_request(
self,
request=AuthorizationRequest,
txt="",
keyjar=None,
verify=True,
sender="",
**kwargs,
):
"""Overridden to use OIC Message type."""
if "keys" in kwargs:
keyjar = kwargs["keys"]
warnings.warn(
"`keys` was renamed to `keyjar`, please update your code.",
DeprecationWarning,
stacklevel=2,
)
return super().parse_jwt_request(
request=request, txt=txt, keyjar=keyjar, verify=verify, sender=sender
)
def parse_check_session_request(self, url=None, query=None):
param = self._parse_urlencoded(url, query)
assert "id_token" in param # nosec, ignore the rest
return deser_id_token(self, param["id_token"][0])
def parse_check_id_request(self, url=None, query=None):
param = self._parse_urlencoded(url, query)
assert "access_token" in param # nosec, ignore the rest
return deser_id_token(self, param["access_token"][0])
def _parse_request(self, request_cls, data, sformat, client_id=None, verify=True):
if sformat == "json":
request = request_cls().from_json(data)
elif sformat == "jwt":
request = request_cls().from_jwt(data, keyjar=self.keyjar, sender=client_id)
elif sformat == "urlencoded":
if "?" in data:
parts = urlparse(data)
scheme, netloc, path, params, query, fragment = parts[:6]
else:
query = data
request = request_cls().from_urlencoded(query)
elif sformat == "dict":
request = request_cls(**data)
else:
raise ParseError(
"Unknown package format: '{}'".format(sformat), request_cls
)
# get the verification keys
if client_id:
keys = self.keyjar.verify_keys(client_id)
sender = client_id
else:
try:
keys = self.keyjar.verify_keys(request["client_id"])
sender = request["client_id"]
except KeyError:
keys = None
sender = ""
logger.debug("Found {} verify keys".format(len(keys or "")))
if verify:
request.verify(key=keys, keyjar=self.keyjar, sender=sender)
return request
def parse_open_id_request(self, data, sformat="urlencoded", client_id=None):
return self._parse_request(OpenIDRequest, data, sformat, client_id)
def parse_user_info_request(self, data, sformat="urlencoded"):
return self._parse_request(UserInfoRequest, data, sformat)
def parse_userinfo_request(self, data, sformat="urlencoded"):
return self._parse_request(UserInfoRequest, data, sformat)
def parse_refresh_session_request(self, url=None, query=None):
if url:
parts = urlparse(url)
query = parts.query
return RefreshSessionRequest().from_urlencoded(query)
def parse_registration_request(self, data, sformat="urlencoded"):
return self._parse_request(RegistrationRequest, data, sformat)
def parse_end_session_request(self, query, sformat="urlencoded"):
esr = self._parse_request(EndSessionRequest, query, sformat)
# if there is a id_token in there it is as a string
esr["id_token"] = deser_id_token(self, esr["id_token"])
return esr
@staticmethod
def update_claims(session, where, about, old_claims=None):
"""
Update claims dictionary.
:param session:
:param where: Which request
:param about: userinfo or id_token
:param old_claims:
:return: claims or None
"""
if old_claims is None:
old_claims = {}
req = None
if where == "oidreq":
try:
req = OpenIDRequest().deserialize(session[where], "json")
except KeyError:
pass
else: # where == "authzreq"
try:
req = AuthorizationRequest().deserialize(session[where], "json")
except KeyError:
pass
if req:
logger.debug("%s: %s" % (where, sanitize(req.to_dict())))
try:
_claims = req["claims"][about]
if _claims:
# update with old claims, do not overwrite
for key, val in old_claims.items():
if key not in _claims:
_claims[key] = val
return _claims
except KeyError:
pass
return old_claims
def id_token_claims(self, session):
"""
Pick the IdToken claims from the request.
:param session: Session information
:return: The IdToken claims
"""
itc: Dict[str, str] = {}
itc = self.update_claims(session, "authzreq", "id_token", itc)
itc = self.update_claims(session, "oidreq", "id_token", itc)
return itc
def make_id_token(
self,
session,
loa="2",
issuer="",
alg="RS256",
code=None,
access_token=None,
user_info=None,
auth_time=0,
exp=None,
extra_claims=None,
):
"""
Create ID Token.
:param session: Session information
:param loa: Level of Assurance/Authentication context
:param issuer: My identifier
:param alg: Which signing algorithm to use for the IdToken
:param code: Access grant
:param access_token: Access Token
:param user_info: If user info are to be part of the IdToken
:return: IDToken instance
"""
# defaults
if exp is None:
inawhile = {"days": 1}
else:
inawhile = exp
# Handle the idtoken_claims
extra = {}
itc = self.id_token_claims(session)
if itc.keys():
try:
inawhile = {"seconds": itc["max_age"]}
except KeyError:
pass
for key, val in itc.items():
if key == "auth_time":
extra["auth_time"] = auth_time
elif key == "acr":
extra["acr"] = verify_acr_level(val, loa)
else:
if auth_time:
extra["auth_time"] = auth_time
if loa:
extra["acr"] = loa
if not user_info:
_args: Dict[str, str] = {}
else:
try:
_args = user_info.to_dict()
except AttributeError:
_args = user_info
# Make sure that there are no name clashes
for key in ["iss", "sub", "aud", "exp", "acr", "nonce", "auth_time"]:
try:
del _args[key]
except KeyError:
pass
halg = "HS%s" % alg[-3:]
if extra_claims is not None:
_args.update(extra_claims)
if code:
_args["c_hash"] = jws.left_hash(code.encode("utf-8"), halg)
if access_token:
_args["at_hash"] = jws.left_hash(access_token.encode("utf-8"), halg)
idt = IdToken(
iss=issuer,
sub=session["sub"],
aud=session["client_id"],
exp=time_util.epoch_in_a_while(**inawhile),
acr=loa,
iat=time_util.utc_time_sans_frac(),
**_args,
)
for key, val in extra.items():
idt[key] = val
if "nonce" in session:
idt["nonce"] = session["nonce"]
return idt
def scope2claims(scopes, extra_scope_dict=None):
res: Dict[str, None] = {}
# Construct the scope translation map
trans_map: Dict[str, Any] = SCOPE2CLAIMS.copy()
if extra_scope_dict is not None:
trans_map.update(extra_scope_dict)
for scope in scopes:
try:
claims = dict([(name, None) for name in trans_map[scope]])
res.update(claims)
except KeyError:
continue
return res
| ./CrossVul/dataset_final_sorted/CWE-347/py/good_4360_0 |
crossvul-python_data_good_4360_2 | import inspect
import json
import logging
import sys
import time
import warnings
from typing import Dict
from typing import List
from urllib.parse import urlencode
from urllib.parse import urlparse
from jwkest import jws
from jwkest.jwe import JWEException
from jwkest.jwe import factory as JWE_factory
from jwkest.jwt import JWT
from oic.exception import InvalidRequest
from oic.exception import IssuerMismatch
from oic.exception import MessageException
from oic.exception import NotForMe
from oic.exception import PyoidcError
from oic.oauth2 import message
from oic.oauth2.exception import VerificationError
from oic.oauth2.message import OPTIONAL_LIST_OF_SP_SEP_STRINGS
from oic.oauth2.message import OPTIONAL_LIST_OF_STRINGS
from oic.oauth2.message import REQUIRED_LIST_OF_SP_SEP_STRINGS
from oic.oauth2.message import REQUIRED_LIST_OF_STRINGS
from oic.oauth2.message import SINGLE_OPTIONAL_INT
from oic.oauth2.message import SINGLE_OPTIONAL_JSON
from oic.oauth2.message import SINGLE_OPTIONAL_STRING
from oic.oauth2.message import SINGLE_REQUIRED_STRING
from oic.oauth2.message import Message
from oic.oauth2.message import MessageFactory
from oic.oauth2.message import MessageTuple
from oic.oauth2.message import MissingRequiredAttribute
from oic.oauth2.message import MissingRequiredValue
from oic.oauth2.message import NotAllowedValue
from oic.oauth2.message import ParamDefinition
from oic.oauth2.message import SchemeError
from oic.utils import time_util
from oic.utils.time_util import utc_time_sans_frac
__author__ = "rohe0002"
logger = logging.getLogger(__name__)
NONCE_STORAGE_TIME = 4 * 3600
class AtHashError(VerificationError):
pass
class CHashError(VerificationError):
pass
class EXPError(VerificationError):
pass
class IATError(VerificationError):
pass
def json_ser(val, sformat=None, lev=0):
return json.dumps(val)
def json_deser(val, sformat=None, lev=0):
return json.loads(val)
def json_conv(val, sformat=None, lev=0):
if isinstance(val, dict):
for key, _val in val.items():
if _val is None:
val[key] = "none"
elif _val is True:
val[key] = "true"
elif _val is False:
val[key] = "false"
return val
def json_rest(val, sformat=None, lev=0):
if isinstance(val, dict):
for key, _val in val.items():
if _val == "none":
val[key] = None
elif _val == "true":
val[key] = True
elif _val == "false":
val[key] = False
return val
# value type, required, serializer, deserializer, null value allowed
SINGLE_OPTIONAL_BOOLEAN = ParamDefinition(bool, False, None, None, False)
SINGLE_OPTIONAL_JSON_WN = ParamDefinition(dict, False, json_ser, json_deser, True)
SINGLE_OPTIONAL_JSON_CONV = ParamDefinition(dict, False, json_conv, json_rest, True)
SINGLE_REQUIRED_INT = ParamDefinition(int, True, None, None, False)
def idtoken_deser(val, sformat="urlencoded"):
# id_token are always serialized as a JWT
return IdToken().deserialize(val, "jwt")
def address_deser(val, sformat="urlencoded"):
if sformat in ["dict", "json"]:
if not isinstance(val, str):
val = json.dumps(val)
sformat = "json"
elif sformat == "dict":
sformat = "json"
return AddressClaim().deserialize(val, sformat)
def claims_deser(val, sformat="urlencoded"):
if sformat in ["dict", "json"]:
if not isinstance(val, str):
val = json.dumps(val)
sformat = "json"
return Claims().deserialize(val, sformat)
def message_deser(val, sformat="urlencoded"):
if sformat in ["dict", "json"]:
if not isinstance(val, str):
val = json.dumps(val)
sformat = "json"
return Message().deserialize(val, sformat)
def msg_ser(inst, sformat, lev=0):
if sformat in ["urlencoded", "json"]:
if isinstance(inst, Message):
res = inst.serialize(sformat, lev)
else:
res = inst
elif sformat == "dict":
if isinstance(inst, Message):
res = inst.serialize(sformat, lev)
elif isinstance(inst, dict):
res = inst
elif isinstance(inst, str): # Iff ID Token
res = inst
else:
raise MessageException("Wrong type: %s" % type(inst))
else:
raise PyoidcError("Unknown sformat", inst)
return res
def msg_ser_json(inst, sformat="json", lev=0):
# sformat = "json" always except when dict
if lev:
sformat = "dict"
if sformat == "dict":
if isinstance(inst, Message):
res = inst.serialize(sformat, lev)
elif isinstance(inst, dict):
res = inst
else:
raise MessageException("Wrong type: %s" % type(inst))
else:
sformat = "json"
if isinstance(inst, Message):
res = inst.serialize(sformat, lev)
else:
res = inst
return res
def msg_list_ser(insts, sformat, lev=0):
return [msg_ser(inst, sformat, lev) for inst in insts]
def claims_ser(val, sformat="urlencoded", lev=0):
# everything in c_extension
if isinstance(val, str):
item = val
elif isinstance(val, list):
item = val[0]
else:
item = val
if isinstance(item, Message):
return item.serialize(method=sformat, lev=lev + 1)
if sformat == "urlencoded":
assert isinstance( # nosec
item, dict
) # We cannot urlencode anything else than Mapping
res = urlencode(item)
elif sformat == "json":
if lev:
res = item
else:
res = json.dumps(item)
elif sformat == "dict":
if isinstance(item, dict):
res = item
else:
raise MessageException("Wrong type: %s" % type(item))
else:
raise PyoidcError("Unknown sformat: %s" % sformat, val)
return res
def registration_request_deser(val, sformat="urlencoded"):
if sformat in ["dict", "json"]:
if not isinstance(val, str):
val = json.dumps(val)
sformat = "json"
return RegistrationRequest().deserialize(val, sformat)
def claims_request_deser(val, sformat="json"):
# never 'urlencoded'
if sformat == "urlencoded":
sformat = "json"
if sformat in ["dict", "json"]:
if not isinstance(val, str):
val = json.dumps(val)
sformat = "json"
return ClaimsRequest().deserialize(val, sformat)
OPTIONAL_ADDRESS = ParamDefinition(Message, False, msg_ser, address_deser, False)
OPTIONAL_LOGICAL = ParamDefinition(bool, False, None, None, False)
OPTIONAL_MULTIPLE_Claims = ParamDefinition(
Message, False, claims_ser, claims_deser, False
)
SINGLE_OPTIONAL_IDTOKEN = ParamDefinition(str, False, msg_ser, None, False)
SINGLE_OPTIONAL_REGISTRATION_REQUEST = ParamDefinition(
Message, False, msg_ser, registration_request_deser, False
)
SINGLE_OPTIONAL_CLAIMSREQ = ParamDefinition(
Message, False, msg_ser_json, claims_request_deser, False
)
OPTIONAL_MESSAGE = ParamDefinition(Message, False, msg_ser, message_deser, False)
REQUIRED_MESSAGE = ParamDefinition(Message, True, msg_ser, message_deser, False)
# ----------------------------------------------------------------------------
SCOPE_CHARSET = []
for char in ["\x21", ("\x23", "\x5b"), ("\x5d", "\x7E")]:
if isinstance(char, tuple):
c = char[0]
while c <= char[1]:
SCOPE_CHARSET.append(c)
c = chr(ord(c) + 1)
else:
SCOPE_CHARSET.append(set)
def check_char_set(string, allowed):
for c in string:
if c not in allowed:
raise NotAllowedValue("'%c' not in the allowed character set" % c)
TOKEN_VERIFY_ARGS = ["key", "keyjar", "algs", "sender"]
def verify_id_token(instance, check_hash=False, **kwargs):
# Try to decode the JWT, checks the signature
args = {}
for arg in TOKEN_VERIFY_ARGS:
try:
args[arg] = kwargs[arg]
except KeyError:
pass
_jws = str(instance["id_token"])
# It can be encrypted, so try to decrypt first
_jwe = JWE_factory(_jws)
if _jwe is not None:
try:
_jws = _jwe.decrypt(keys=kwargs["keyjar"].get_decrypt_key())
except JWEException as err:
raise VerificationError("Could not decrypt id_token", err)
_packer = JWT()
_body = _packer.unpack(_jws).payload()
if "keyjar" in kwargs:
try:
if _body["iss"] not in kwargs["keyjar"]:
raise ValueError("Unknown issuer")
except KeyError:
raise MissingRequiredAttribute("iss")
if _jwe is not None:
# Use the original encrypted token to set correct headers
idt = IdToken().from_jwt(str(instance["id_token"]), **args)
else:
idt = IdToken().from_jwt(_jws, **args)
if not idt.verify(**kwargs):
raise VerificationError("Could not verify id_token", idt)
if check_hash:
_alg = idt.jws_header["alg"]
if _alg != "none":
hfunc = "HS" + _alg[-3:]
else:
# This is allowed only for `code` and it needs to be checked by a Client
hfunc = None
if "access_token" in instance and hfunc is not None:
if "at_hash" not in idt:
raise MissingRequiredAttribute("Missing at_hash property", idt)
if idt["at_hash"] != jws.left_hash(instance["access_token"], hfunc):
raise AtHashError("Failed to verify access_token hash", idt)
if "code" in instance and hfunc is not None:
if "c_hash" not in idt:
raise MissingRequiredAttribute("Missing c_hash property", idt)
if idt["c_hash"] != jws.left_hash(instance["code"], hfunc):
raise CHashError("Failed to verify code hash", idt)
return idt
# -----------------------------------------------------------------------------
class RefreshAccessTokenRequest(message.RefreshAccessTokenRequest):
pass
class TokenErrorResponse(message.TokenErrorResponse):
pass
class AccessTokenResponse(message.AccessTokenResponse):
c_param = message.AccessTokenResponse.c_param.copy()
c_param.update({"id_token": SINGLE_OPTIONAL_STRING})
def verify(self, **kwargs):
super().verify(**kwargs)
if "id_token" in self:
# replace the JWT with the verified IdToken instance
self["id_token"] = verify_id_token(self, **kwargs)
return True
class UserInfoRequest(Message):
c_param = {"access_token": SINGLE_OPTIONAL_STRING}
class AuthorizationResponse(message.AuthorizationResponse, message.AccessTokenResponse):
c_param = message.AuthorizationResponse.c_param.copy()
c_param.update(message.AccessTokenResponse.c_param)
c_param.update(
{
"code": SINGLE_OPTIONAL_STRING,
"access_token": SINGLE_OPTIONAL_STRING,
"token_type": SINGLE_OPTIONAL_STRING,
"id_token": SINGLE_OPTIONAL_IDTOKEN,
}
)
def verify(self, **kwargs):
super().verify(**kwargs)
if "aud" in self:
if "client_id" in kwargs:
# check that it's for me
if kwargs["client_id"] not in self["aud"]:
return False
if "id_token" in self:
self["id_token"] = verify_id_token(self, check_hash=True, **kwargs)
if "access_token" in self:
if "token_type" not in self:
raise MissingRequiredValue("Missing token_type parameter", self)
return True
class AuthorizationErrorResponse(message.AuthorizationErrorResponse):
c_allowed_values = message.AuthorizationErrorResponse.c_allowed_values.copy()
c_allowed_values["error"].extend(
[
"interaction_required",
"login_required",
"session_selection_required",
"consent_required",
"invalid_request_uri",
"invalid_request_object",
"registration_not_supported",
"request_not_supported",
"request_uri_not_supported",
]
)
class AuthorizationRequest(message.AuthorizationRequest):
c_param = message.AuthorizationRequest.c_param.copy()
c_param.update(
{
"scope": REQUIRED_LIST_OF_SP_SEP_STRINGS,
"redirect_uri": SINGLE_REQUIRED_STRING,
"nonce": SINGLE_OPTIONAL_STRING,
"display": SINGLE_OPTIONAL_STRING,
"prompt": OPTIONAL_LIST_OF_STRINGS,
"max_age": SINGLE_OPTIONAL_INT,
"ui_locales": OPTIONAL_LIST_OF_SP_SEP_STRINGS,
"claims_locales": OPTIONAL_LIST_OF_SP_SEP_STRINGS,
"id_token_hint": SINGLE_OPTIONAL_STRING,
"login_hint": SINGLE_OPTIONAL_STRING,
"acr_values": OPTIONAL_LIST_OF_SP_SEP_STRINGS,
"claims": SINGLE_OPTIONAL_CLAIMSREQ,
"registration": SINGLE_OPTIONAL_JSON,
"request": SINGLE_OPTIONAL_STRING,
"request_uri": SINGLE_OPTIONAL_STRING,
"response_mode": SINGLE_OPTIONAL_STRING,
}
)
c_allowed_values = message.AuthorizationRequest.c_allowed_values.copy()
c_allowed_values.update(
{
"display": ["page", "popup", "touch", "wap"],
"prompt": ["none", "login", "consent", "select_account"],
}
)
def verify(self, **kwargs):
"""
Check that the request is valid.
Authorization Request parameters that are OPTIONAL in the OAuth 2.0
specification MAY be included in the OpenID Request Object without also
passing them as OAuth 2.0 Authorization Request parameters, with one
exception: The scope parameter MUST always be present in OAuth 2.0
Authorization Request parameters.
All parameter values that are present both in the OAuth 2.0
Authorization Request and in the OpenID Request Object MUST exactly match.
"""
super().verify(**kwargs)
args = {}
for arg in ["key", "keyjar", "opponent_id", "sender"]:
try:
args[arg] = kwargs[arg]
except KeyError:
pass
if "opponent_id" not in kwargs:
args["opponent_id"] = self["client_id"]
if "request" in self:
if isinstance(self["request"], str):
# Try to decode the JWT, checks the signature
oidr = OpenIDRequest().from_jwt(str(self["request"]), **args)
# verify that nothing is change in the original message
for key, val in oidr.items():
if key in self and self[key] != val:
raise AssertionError()
# replace the JWT with the parsed and verified instance
self["request"] = oidr
if "id_token_hint" in self:
if isinstance(self["id_token_hint"], str):
idt = IdToken().from_jwt(str(self["id_token_hint"]), **args)
self["id_token_hint"] = idt
if "response_type" not in self:
raise MissingRequiredAttribute("response_type missing", self)
_rt = self["response_type"]
if "token" in _rt or "id_token" in _rt:
if "nonce" not in self:
raise MissingRequiredAttribute("Nonce missing", self)
if "openid" not in self.get("scope", []):
raise MissingRequiredValue("openid not in scope", self)
if "offline_access" in self.get("scope", []):
if "prompt" not in self or "consent" not in self["prompt"]:
raise MissingRequiredValue("consent in prompt", self)
if "prompt" in self:
if "none" in self["prompt"] and len(self["prompt"]) > 1:
raise InvalidRequest("prompt none combined with other value", self)
return True
class AccessTokenRequest(message.AccessTokenRequest):
c_param = message.AccessTokenRequest.c_param.copy()
c_param.update(
{
"client_assertion_type": SINGLE_OPTIONAL_STRING,
"client_assertion": SINGLE_OPTIONAL_STRING,
}
)
c_default = {"grant_type": "authorization_code"}
c_allowed_values = {
"client_assertion_type": [
"urn:ietf:params:oauth:client-assertion-type:jwt-bearer"
]
}
class AddressClaim(Message):
c_param = {
"formatted": SINGLE_OPTIONAL_STRING,
"street_address": SINGLE_OPTIONAL_STRING,
"locality": SINGLE_OPTIONAL_STRING,
"region": SINGLE_OPTIONAL_STRING,
"postal_code": SINGLE_OPTIONAL_STRING,
"country": SINGLE_OPTIONAL_STRING,
}
class OpenIDSchema(Message):
c_param = {
"sub": SINGLE_REQUIRED_STRING,
"name": SINGLE_OPTIONAL_STRING,
"given_name": SINGLE_OPTIONAL_STRING,
"family_name": SINGLE_OPTIONAL_STRING,
"middle_name": SINGLE_OPTIONAL_STRING,
"nickname": SINGLE_OPTIONAL_STRING,
"preferred_username": SINGLE_OPTIONAL_STRING,
"profile": SINGLE_OPTIONAL_STRING,
"picture": SINGLE_OPTIONAL_STRING,
"website": SINGLE_OPTIONAL_STRING,
"email": SINGLE_OPTIONAL_STRING,
"email_verified": SINGLE_OPTIONAL_BOOLEAN,
"gender": SINGLE_OPTIONAL_STRING,
"birthdate": SINGLE_OPTIONAL_STRING,
"zoneinfo": SINGLE_OPTIONAL_STRING,
"locale": SINGLE_OPTIONAL_STRING,
"phone_number": SINGLE_OPTIONAL_STRING,
"phone_number_verified": SINGLE_OPTIONAL_BOOLEAN,
"address": OPTIONAL_ADDRESS,
"updated_at": SINGLE_OPTIONAL_INT,
"_claim_names": OPTIONAL_MESSAGE,
"_claim_sources": OPTIONAL_MESSAGE,
}
def verify(self, **kwargs):
super().verify(**kwargs)
if "birthdate" in self:
# Either YYYY-MM-DD or just YYYY or 0000-MM-DD
try:
time.strptime(self["birthdate"], "%Y-%m-%d")
except ValueError:
try:
time.strptime(self["birthdate"], "%Y")
except ValueError:
try:
time.strptime(self["birthdate"], "0000-%m-%d")
except ValueError:
raise VerificationError("Birthdate format error", self)
if any(val is None for val in self.values()):
return False
return True
class RegistrationRequest(Message):
c_param = {
"redirect_uris": REQUIRED_LIST_OF_STRINGS,
"response_types": OPTIONAL_LIST_OF_STRINGS,
"grant_types": OPTIONAL_LIST_OF_STRINGS,
"application_type": SINGLE_OPTIONAL_STRING,
"contacts": OPTIONAL_LIST_OF_STRINGS,
"client_name": SINGLE_OPTIONAL_STRING,
"logo_uri": SINGLE_OPTIONAL_STRING,
"client_uri": SINGLE_OPTIONAL_STRING,
"policy_uri": SINGLE_OPTIONAL_STRING,
"tos_uri": SINGLE_OPTIONAL_STRING,
"jwks": SINGLE_OPTIONAL_STRING,
"jwks_uri": SINGLE_OPTIONAL_STRING,
"sector_identifier_uri": SINGLE_OPTIONAL_STRING,
"subject_type": SINGLE_OPTIONAL_STRING,
"id_token_signed_response_alg": SINGLE_OPTIONAL_STRING,
"id_token_encrypted_response_alg": SINGLE_OPTIONAL_STRING,
"id_token_encrypted_response_enc": SINGLE_OPTIONAL_STRING,
"userinfo_signed_response_alg": SINGLE_OPTIONAL_STRING,
"userinfo_encrypted_response_alg": SINGLE_OPTIONAL_STRING,
"userinfo_encrypted_response_enc": SINGLE_OPTIONAL_STRING,
"request_object_signing_alg": SINGLE_OPTIONAL_STRING,
"request_object_encryption_alg": SINGLE_OPTIONAL_STRING,
"request_object_encryption_enc": SINGLE_OPTIONAL_STRING,
"token_endpoint_auth_method": SINGLE_OPTIONAL_STRING,
"token_endpoint_auth_signing_alg": SINGLE_OPTIONAL_STRING,
"default_max_age": SINGLE_OPTIONAL_INT,
"require_auth_time": OPTIONAL_LOGICAL,
"default_acr_values": OPTIONAL_LIST_OF_STRINGS,
"initiate_login_uri": SINGLE_OPTIONAL_STRING,
"request_uris": OPTIONAL_LIST_OF_STRINGS,
"post_logout_redirect_uris": OPTIONAL_LIST_OF_STRINGS,
"frontchannel_logout_uri": SINGLE_OPTIONAL_STRING,
"frontchannel_logout_session_required": OPTIONAL_LOGICAL,
"backchannel_logout_uri": SINGLE_OPTIONAL_STRING,
"backchannel_logout_session_required": OPTIONAL_LOGICAL,
}
c_default = {"application_type": "web", "response_types": ["code"]}
c_allowed_values = {
"application_type": ["native", "web"],
"subject_type": ["public", "pairwise"],
}
def verify(self, **kwargs):
super().verify(**kwargs)
if "initiate_login_uri" in self and not self["initiate_login_uri"].startswith(
"https:"
):
raise AssertionError()
for param in [
"request_object_encryption",
"id_token_encrypted_response",
"userinfo_encrypted_response",
]:
alg_param = "%s_alg" % param
enc_param = "%s_enc" % param
if alg_param in self:
if enc_param not in self:
self[enc_param] = "A128CBC-HS256"
# both or none
if enc_param in self and alg_param not in self:
raise AssertionError()
if (
"token_endpoint_auth_signing_alg" in self
and self["token_endpoint_auth_signing_alg"] == "none"
):
raise AssertionError()
return True
class RegistrationResponse(Message):
"""Response to client_register registration requests."""
c_param = {
"client_id": SINGLE_REQUIRED_STRING,
"client_secret": SINGLE_OPTIONAL_STRING,
"registration_access_token": SINGLE_OPTIONAL_STRING,
"registration_client_uri": SINGLE_OPTIONAL_STRING,
"client_id_issued_at": SINGLE_OPTIONAL_INT,
"client_secret_expires_at": SINGLE_OPTIONAL_INT,
}
c_param.update(RegistrationRequest.c_param)
def verify(self, **kwargs):
"""
Verify that the response is valid.
Implementations MUST either return both a Client Configuration Endpoint
and a Registration Access Token or neither of them.
:param kwargs:
:return: True if the message is OK otherwise False
"""
super(RegistrationResponse, self).verify(**kwargs)
has_reg_uri = "registration_client_uri" in self
has_reg_at = "registration_access_token" in self
if has_reg_uri != has_reg_at:
raise VerificationError(
(
"Only one of registration_client_uri"
" and registration_access_token present"
),
self,
)
return True
class ClientRegistrationErrorResponse(message.ErrorResponse):
c_allowed_values = {
"error": [
"invalid_redirect_uri",
"invalid_client_metadata",
"invalid_configuration_parameter",
]
}
class IdToken(OpenIDSchema):
c_param = OpenIDSchema.c_param.copy()
c_param.update(
{
"iss": SINGLE_REQUIRED_STRING,
"sub": SINGLE_REQUIRED_STRING,
"aud": REQUIRED_LIST_OF_STRINGS, # Array of strings or string
"exp": SINGLE_REQUIRED_INT,
"iat": SINGLE_REQUIRED_INT,
"auth_time": SINGLE_OPTIONAL_INT,
"nonce": SINGLE_OPTIONAL_STRING,
"at_hash": SINGLE_OPTIONAL_STRING,
"c_hash": SINGLE_OPTIONAL_STRING,
"acr": SINGLE_OPTIONAL_STRING,
"amr": OPTIONAL_LIST_OF_STRINGS,
"azp": SINGLE_OPTIONAL_STRING,
"sub_jwk": SINGLE_OPTIONAL_STRING,
}
)
def verify(self, **kwargs):
super(IdToken, self).verify(**kwargs)
try:
if kwargs["iss"] != self["iss"]:
raise IssuerMismatch("{} != {}".format(kwargs["iss"], self["iss"]))
except KeyError:
pass
if "aud" in self:
if "client_id" in kwargs:
# check that I'm among the recipients
if kwargs["client_id"] not in self["aud"]:
raise NotForMe(
"{} not in aud:{}".format(kwargs["client_id"], self["aud"]),
self,
)
# Then azp has to be present and be one of the aud values
if len(self["aud"]) > 1:
if "azp" not in self:
raise VerificationError("azp missing", self)
if self["azp"] not in self["aud"]:
raise VerificationError("Mismatch between azp and aud claims", self)
if "azp" in self:
if "client_id" in kwargs:
if kwargs["client_id"] != self["azp"]:
raise NotForMe(
"{} != azp:{}".format(kwargs["client_id"], self["azp"]), self
)
_now = time_util.utc_time_sans_frac()
try:
_skew = kwargs["skew"]
except KeyError:
_skew = 0
try:
_exp = self["exp"]
except KeyError:
raise MissingRequiredAttribute("exp")
else:
if (_now - _skew) > _exp:
raise EXPError("Invalid expiration time")
try:
_storage_time = kwargs["nonce_storage_time"]
except KeyError:
_storage_time = NONCE_STORAGE_TIME
try:
_iat = self["iat"]
except KeyError:
raise MissingRequiredAttribute("iat")
else:
if (_iat + _storage_time) < (_now - _skew):
raise IATError("Issued too long ago")
if _now < (_iat - _skew):
raise IATError("Issued in the future")
if _exp < _iat:
raise EXPError("Invalid expiration time")
return True
class StateFullMessage(Message):
c_param = {"state": SINGLE_REQUIRED_STRING}
class RefreshSessionRequest(StateFullMessage):
c_param = StateFullMessage.c_param.copy()
c_param.update(
{"id_token": SINGLE_REQUIRED_STRING, "redirect_url": SINGLE_REQUIRED_STRING}
)
def verify(self, **kwargs):
super(RefreshSessionRequest, self).verify(**kwargs)
if "id_token" in self:
self["id_token"] = verify_id_token(self, check_hash=True, **kwargs)
class RefreshSessionResponse(StateFullMessage):
c_param = StateFullMessage.c_param.copy()
c_param.update({"id_token": SINGLE_REQUIRED_STRING})
def verify(self, **kwargs):
super(RefreshSessionResponse, self).verify(**kwargs)
if "id_token" in self:
self["id_token"] = verify_id_token(self, check_hash=True, **kwargs)
class CheckSessionRequest(Message):
c_param = {"id_token": SINGLE_REQUIRED_STRING}
def verify(self, **kwargs):
super(CheckSessionRequest, self).verify(**kwargs)
if "id_token" in self:
self["id_token"] = verify_id_token(self, check_hash=True, **kwargs)
class CheckIDRequest(Message):
c_param = {"access_token": SINGLE_REQUIRED_STRING}
class EndSessionRequest(Message):
c_param = {
"id_token_hint": SINGLE_OPTIONAL_STRING,
"post_logout_redirect_uri": SINGLE_OPTIONAL_STRING,
"state": SINGLE_OPTIONAL_STRING,
}
class EndSessionResponse(Message):
c_param = {"state": SINGLE_OPTIONAL_STRING}
class Claims(Message):
pass
class ClaimsRequest(Message):
c_param = {
"userinfo": OPTIONAL_MULTIPLE_Claims,
"id_token": OPTIONAL_MULTIPLE_Claims,
}
class OpenIDRequest(AuthorizationRequest):
pass
class ProviderConfigurationResponse(Message):
c_param = {
"issuer": SINGLE_REQUIRED_STRING,
"authorization_endpoint": SINGLE_REQUIRED_STRING,
"token_endpoint": SINGLE_OPTIONAL_STRING,
"userinfo_endpoint": SINGLE_OPTIONAL_STRING,
"jwks_uri": SINGLE_REQUIRED_STRING,
"registration_endpoint": SINGLE_OPTIONAL_STRING,
"scopes_supported": OPTIONAL_LIST_OF_STRINGS,
"response_types_supported": REQUIRED_LIST_OF_STRINGS,
"response_modes_supported": OPTIONAL_LIST_OF_STRINGS,
"grant_types_supported": OPTIONAL_LIST_OF_STRINGS,
"acr_values_supported": OPTIONAL_LIST_OF_STRINGS,
"subject_types_supported": REQUIRED_LIST_OF_STRINGS,
"id_token_signing_alg_values_supported": REQUIRED_LIST_OF_STRINGS,
"id_token_encryption_alg_values_supported": OPTIONAL_LIST_OF_STRINGS,
"id_token_encryption_enc_values_supported": OPTIONAL_LIST_OF_STRINGS,
"userinfo_signing_alg_values_supported": OPTIONAL_LIST_OF_STRINGS,
"userinfo_encryption_alg_values_supported": OPTIONAL_LIST_OF_STRINGS,
"userinfo_encryption_enc_values_supported": OPTIONAL_LIST_OF_STRINGS,
"request_object_signing_alg_values_supported": OPTIONAL_LIST_OF_STRINGS,
"request_object_encryption_alg_values_supported": OPTIONAL_LIST_OF_STRINGS,
"request_object_encryption_enc_values_supported": OPTIONAL_LIST_OF_STRINGS,
"token_endpoint_auth_methods_supported": OPTIONAL_LIST_OF_STRINGS,
"token_endpoint_auth_signing_alg_values_supported": OPTIONAL_LIST_OF_STRINGS,
"display_values_supported": OPTIONAL_LIST_OF_STRINGS,
"claim_types_supported": OPTIONAL_LIST_OF_STRINGS,
"claims_supported": OPTIONAL_LIST_OF_STRINGS,
"service_documentation": SINGLE_OPTIONAL_STRING,
"claims_locales_supported": OPTIONAL_LIST_OF_STRINGS,
"ui_locales_supported": OPTIONAL_LIST_OF_STRINGS,
"claims_parameter_supported": SINGLE_OPTIONAL_BOOLEAN,
"request_parameter_supported": SINGLE_OPTIONAL_BOOLEAN,
"request_uri_parameter_supported": SINGLE_OPTIONAL_BOOLEAN,
"require_request_uri_registration": SINGLE_OPTIONAL_BOOLEAN,
"op_policy_uri": SINGLE_OPTIONAL_STRING,
"op_tos_uri": SINGLE_OPTIONAL_STRING,
"check_session_iframe": SINGLE_OPTIONAL_STRING,
"end_session_endpoint": SINGLE_OPTIONAL_STRING,
"frontchannel_logout_supported": SINGLE_OPTIONAL_BOOLEAN,
"frontchannel_logout_session_supported": SINGLE_OPTIONAL_BOOLEAN,
"backchannel_logout_supported": SINGLE_OPTIONAL_BOOLEAN,
"backchannel_logout_session_supported": SINGLE_OPTIONAL_BOOLEAN,
}
c_default = {
"version": "3.0",
"token_endpoint_auth_methods_supported": ["client_secret_basic"],
"claims_parameter_supported": False,
"request_parameter_supported": False,
"request_uri_parameter_supported": True,
"require_request_uri_registration": False,
"grant_types_supported": ["authorization_code", "implicit"],
"frontchannel_logout_supported": False,
"frontchannel_logout_session_supported": False,
"backchannel_logout_supported": False,
"backchannel_logout_session_supported": False,
}
def verify(self, **kwargs):
super().verify(**kwargs)
if "scopes_supported" in self:
if "openid" not in self["scopes_supported"]:
raise AssertionError()
for scope in self["scopes_supported"]:
check_char_set(scope, SCOPE_CHARSET)
parts = urlparse(self["issuer"])
if parts.scheme != "https":
raise SchemeError("Not HTTPS")
if parts.query or parts.fragment:
raise AssertionError()
if (
any("code" in rt for rt in self["response_types_supported"])
and "token_endpoint" not in self
):
raise MissingRequiredAttribute("token_endpoint")
return True
class AuthnToken(Message):
c_param = {
"iss": SINGLE_REQUIRED_STRING,
"sub": SINGLE_REQUIRED_STRING,
"aud": REQUIRED_LIST_OF_STRINGS, # Array of strings or string
"jti": SINGLE_REQUIRED_STRING,
"exp": SINGLE_REQUIRED_INT,
"iat": SINGLE_OPTIONAL_INT,
}
# According to RFC 7519 all claims are optional
class JasonWebToken(Message):
c_param = {
"iss": SINGLE_OPTIONAL_STRING,
"sub": SINGLE_OPTIONAL_STRING,
"aud": OPTIONAL_LIST_OF_STRINGS, # Array of strings or string
"exp": SINGLE_OPTIONAL_INT,
"nbf": SINGLE_OPTIONAL_INT,
"iat": SINGLE_OPTIONAL_INT,
"jti": SINGLE_OPTIONAL_STRING,
}
def jwt_deser(val, sformat="json"):
if sformat == "urlencoded":
sformat = "json"
if sformat in ["dict", "json"]:
if not isinstance(val, str):
val = json.dumps(val)
sformat = "json"
return JasonWebToken().deserialize(val, sformat)
SINGLE_OPTIONAL_JWT = ParamDefinition(Message, False, msg_ser, jwt_deser, False)
class UserInfoErrorResponse(message.ErrorResponse):
c_allowed_values = {
"error": [
"invalid_schema",
"invalid_request",
"invalid_token",
"insufficient_scope",
]
}
class DiscoveryRequest(Message):
c_param = {"principal": SINGLE_REQUIRED_STRING, "service": SINGLE_REQUIRED_STRING}
class DiscoveryResponse(Message):
c_param = {"locations": REQUIRED_LIST_OF_STRINGS}
class ResourceRequest(Message):
c_param = {"access_token": SINGLE_OPTIONAL_STRING}
SCOPE2CLAIMS: Dict[str, List[str]] = {
"openid": ["sub"],
"profile": [
"name",
"given_name",
"family_name",
"middle_name",
"nickname",
"profile",
"picture",
"website",
"gender",
"birthdate",
"zoneinfo",
"locale",
"updated_at",
"preferred_username",
],
"email": ["email", "email_verified"],
"address": ["address"],
"phone": ["phone_number", "phone_number_verified"],
"offline_access": [],
}
# LOGOUT related messages
SINGLE_OPTIONAL_JSON = ParamDefinition(dict, False, json_ser, json_deser, False)
SINGLE_REQUIRED_JSON = ParamDefinition(dict, True, json_ser, json_deser, False)
BACK_CHANNEL_LOGOUT_EVENT = "http://schemas.openid.net/event/backchannel-logout"
class LogoutToken(Message):
"""Defined in https://openid.net/specs/openid-connect-backchannel-1_0.html#LogoutToken ."""
c_param = {
"iss": SINGLE_REQUIRED_STRING,
"sub": SINGLE_OPTIONAL_STRING,
"aud": REQUIRED_LIST_OF_STRINGS, # Array of strings or string
"iat": SINGLE_REQUIRED_INT,
"jti": SINGLE_REQUIRED_STRING,
"events": SINGLE_REQUIRED_JSON,
"sid": SINGLE_OPTIONAL_STRING,
}
def verify(self, **kwargs):
super().verify(**kwargs)
if "nonce" in self:
raise MessageException(
'"nonce" is prohibited from appearing in a LogoutToken.'
)
# Check the 'events' JSON
_keys = list(self["events"].keys())
if len(_keys) != 1:
raise ValueError('Must only be one member in "events"')
if _keys[0] != BACK_CHANNEL_LOGOUT_EVENT:
raise ValueError('Wrong member in "events"')
if self["events"][_keys[0]] != {}:
raise ValueError('Wrong member value in "events"')
# There must be either a 'sub' or a 'sid', and may contain both
if not ("sub" in self or "sid" in self):
raise ValueError('There MUST be either a "sub" or a "sid"')
try:
if kwargs["aud"] not in self["aud"]:
raise NotForMe("Not among intended audience")
except KeyError:
pass
try:
if kwargs["iss"] != self["iss"]:
raise NotForMe("Wrong issuer")
except KeyError:
pass
_now = utc_time_sans_frac()
_skew = kwargs.get("skew", 0)
_iat = self.get("iat", 0)
if _iat and _iat > (_now + _skew):
raise ValueError("Invalid issued_at time")
return True
ID_TOKEN_VERIFY_ARGS = [
"keyjar",
"verify",
"encalg",
"encenc",
"sigalg",
"issuer",
"allow_missing_kid",
"no_kid_issuer",
"trusting",
"skew",
"nonce_storage_time",
"client_id",
]
class BackChannelLogoutRequest(Message):
"""Defines the message used in https://openid.net/specs/openid-connect-backchannel-1_0.html ."""
c_param = {"logout_token": SINGLE_REQUIRED_STRING}
def verify(self, **kwargs):
super().verify(**kwargs)
args = {arg: kwargs[arg] for arg in TOKEN_VERIFY_ARGS if arg in kwargs}
logout_token = LogoutToken().from_jwt(str(self["logout_token"]), **args)
logout_token.verify(**kwargs)
self["logout_token"] = logout_token
logger.info("Verified Logout Token: {}".format(logout_token.to_dict()))
return True
class FrontChannelLogoutRequest(Message):
"""Defines the message used in https://openid.net/specs/openid-connect-frontchannel-1_0.html ."""
c_param = {"iss": SINGLE_OPTIONAL_STRING, "sid": SINGLE_OPTIONAL_STRING}
MSG = {
"RefreshAccessTokenRequest": RefreshAccessTokenRequest,
"TokenErrorResponse": TokenErrorResponse,
"AccessTokenResponse": AccessTokenResponse,
"UserInfoRequest": UserInfoRequest,
"AuthorizationResponse": AuthorizationResponse,
"AuthorizationErrorResponse": AuthorizationErrorResponse,
"AuthorizationRequest": AuthorizationRequest,
"AccessTokenRequest": AccessTokenRequest,
"AddressClaim": AddressClaim,
"OpenIDSchema": OpenIDSchema,
"RegistrationRequest": RegistrationRequest,
"RegistrationResponse": RegistrationResponse,
"ClientRegistrationErrorResponse": ClientRegistrationErrorResponse,
"IdToken": IdToken,
"RefreshSessionRequest": RefreshSessionRequest,
"RefreshSessionResponse": RefreshSessionResponse,
"CheckSessionRequest": CheckSessionRequest,
"CheckIDRequest": CheckIDRequest,
"EndSessionRequest": EndSessionRequest,
"EndSessionResponse": EndSessionResponse,
"Claims": Claims,
"OpenIDRequest": OpenIDRequest,
"ProviderConfigurationResponse": ProviderConfigurationResponse,
"AuthnToken": AuthnToken,
"UserInfoErrorResponse": UserInfoErrorResponse,
"DiscoveryRequest": DiscoveryRequest,
"DiscoveryResponse": DiscoveryResponse,
"ResourceRequest": ResourceRequest,
# LOGOUT messages
"LogoutToken": LogoutToken,
"BackChannelLogoutRequest": BackChannelLogoutRequest,
"FrontChannelLogoutRequest": FrontChannelLogoutRequest,
}
def factory(msgtype):
warnings.warn(
"`factory` is deprecated. Use `OIDCMessageFactory` instead.", DeprecationWarning
)
for _, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and issubclass(obj, Message):
try:
if obj.__name__ == msgtype:
return obj
except AttributeError:
pass
# Fall back to basic OAuth2 messages
return message.factory(msgtype)
class OIDCMessageFactory(MessageFactory):
"""Factory that knows OIDC message types."""
authorization_endpoint = MessageTuple(AuthorizationRequest, AuthorizationResponse)
token_endpoint = MessageTuple(AccessTokenRequest, AccessTokenResponse)
refresh_endpoint = MessageTuple(RefreshAccessTokenRequest, AccessTokenResponse)
resource_endpoint = MessageTuple(ResourceRequest, Message)
configuration_endpoint = MessageTuple(Message, ProviderConfigurationResponse)
userinfo_endpoint = MessageTuple(UserInfoRequest, Message)
registration_endpoint = MessageTuple(RegistrationRequest, RegistrationResponse)
endsession_endpoint = MessageTuple(EndSessionRequest, EndSessionResponse)
checkid_endpoint = MessageTuple(CheckIDRequest, IdToken)
checksession_endpoint = MessageTuple(CheckSessionRequest, IdToken)
refreshsession_endpoint = MessageTuple(
RefreshSessionRequest, RefreshSessionResponse
)
discovery_endpoint = MessageTuple(DiscoveryRequest, DiscoveryResponse)
| ./CrossVul/dataset_final_sorted/CWE-347/py/good_4360_2 |
crossvul-python_data_good_1888_2 | null | ./CrossVul/dataset_final_sorted/CWE-347/py/good_1888_2 |
crossvul-python_data_bad_3997_0 | # -*- coding: utf-8 -*-
#
# fastecdsa documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 15 20:02:52 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
from datetime import datetime
import os
import sys
from unittest import mock
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))
MOCK_MODULES = ['fastecdsa._ecdsa', 'fastecdsa.curvemath']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.mathjax', 'sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'fastecdsa'
copyright = '{}, Anton Kueltz'.format(datetime.now().year)
author = 'Anton Kueltz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1'
# The full version, including alpha/beta/rc tags.
release = '2.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'fastecdsadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'fastecdsa.tex', 'fastecdsa Documentation',
'Anton Kueltz', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fastecdsa', 'fastecdsa Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'fastecdsa', 'fastecdsa Documentation',
author, 'fastecdsa', 'One line description of project.',
'Miscellaneous'),
]
| ./CrossVul/dataset_final_sorted/CWE-347/py/bad_3997_0 |
crossvul-python_data_bad_4360_2 | import inspect
import json
import logging
import sys
import time
import warnings
from typing import Dict
from typing import List
from urllib.parse import urlencode
from urllib.parse import urlparse
from jwkest import jws
from jwkest.jwe import JWEException
from jwkest.jwe import factory as JWE_factory
from jwkest.jwt import JWT
from oic.exception import InvalidRequest
from oic.exception import IssuerMismatch
from oic.exception import MessageException
from oic.exception import NotForMe
from oic.exception import PyoidcError
from oic.oauth2 import message
from oic.oauth2.exception import VerificationError
from oic.oauth2.message import OPTIONAL_LIST_OF_SP_SEP_STRINGS
from oic.oauth2.message import OPTIONAL_LIST_OF_STRINGS
from oic.oauth2.message import REQUIRED_LIST_OF_SP_SEP_STRINGS
from oic.oauth2.message import REQUIRED_LIST_OF_STRINGS
from oic.oauth2.message import SINGLE_OPTIONAL_INT
from oic.oauth2.message import SINGLE_OPTIONAL_JSON
from oic.oauth2.message import SINGLE_OPTIONAL_STRING
from oic.oauth2.message import SINGLE_REQUIRED_STRING
from oic.oauth2.message import Message
from oic.oauth2.message import MessageFactory
from oic.oauth2.message import MessageTuple
from oic.oauth2.message import MissingRequiredAttribute
from oic.oauth2.message import MissingRequiredValue
from oic.oauth2.message import NotAllowedValue
from oic.oauth2.message import ParamDefinition
from oic.oauth2.message import SchemeError
from oic.utils import time_util
from oic.utils.time_util import utc_time_sans_frac
__author__ = "rohe0002"
logger = logging.getLogger(__name__)
NONCE_STORAGE_TIME = 4 * 3600
class AtHashError(VerificationError):
pass
class CHashError(VerificationError):
pass
class EXPError(VerificationError):
pass
class IATError(VerificationError):
pass
def json_ser(val, sformat=None, lev=0):
return json.dumps(val)
def json_deser(val, sformat=None, lev=0):
return json.loads(val)
def json_conv(val, sformat=None, lev=0):
if isinstance(val, dict):
for key, _val in val.items():
if _val is None:
val[key] = "none"
elif _val is True:
val[key] = "true"
elif _val is False:
val[key] = "false"
return val
def json_rest(val, sformat=None, lev=0):
if isinstance(val, dict):
for key, _val in val.items():
if _val == "none":
val[key] = None
elif _val == "true":
val[key] = True
elif _val == "false":
val[key] = False
return val
# value type, required, serializer, deserializer, null value allowed
SINGLE_OPTIONAL_BOOLEAN = ParamDefinition(bool, False, None, None, False)
SINGLE_OPTIONAL_JSON_WN = ParamDefinition(dict, False, json_ser, json_deser, True)
SINGLE_OPTIONAL_JSON_CONV = ParamDefinition(dict, False, json_conv, json_rest, True)
SINGLE_REQUIRED_INT = ParamDefinition(int, True, None, None, False)
def idtoken_deser(val, sformat="urlencoded"):
# id_token are always serialized as a JWT
return IdToken().deserialize(val, "jwt")
def address_deser(val, sformat="urlencoded"):
if sformat in ["dict", "json"]:
if not isinstance(val, str):
val = json.dumps(val)
sformat = "json"
elif sformat == "dict":
sformat = "json"
return AddressClaim().deserialize(val, sformat)
def claims_deser(val, sformat="urlencoded"):
if sformat in ["dict", "json"]:
if not isinstance(val, str):
val = json.dumps(val)
sformat = "json"
return Claims().deserialize(val, sformat)
def message_deser(val, sformat="urlencoded"):
if sformat in ["dict", "json"]:
if not isinstance(val, str):
val = json.dumps(val)
sformat = "json"
return Message().deserialize(val, sformat)
def msg_ser(inst, sformat, lev=0):
if sformat in ["urlencoded", "json"]:
if isinstance(inst, Message):
res = inst.serialize(sformat, lev)
else:
res = inst
elif sformat == "dict":
if isinstance(inst, Message):
res = inst.serialize(sformat, lev)
elif isinstance(inst, dict):
res = inst
elif isinstance(inst, str): # Iff ID Token
res = inst
else:
raise MessageException("Wrong type: %s" % type(inst))
else:
raise PyoidcError("Unknown sformat", inst)
return res
def msg_ser_json(inst, sformat="json", lev=0):
# sformat = "json" always except when dict
if lev:
sformat = "dict"
if sformat == "dict":
if isinstance(inst, Message):
res = inst.serialize(sformat, lev)
elif isinstance(inst, dict):
res = inst
else:
raise MessageException("Wrong type: %s" % type(inst))
else:
sformat = "json"
if isinstance(inst, Message):
res = inst.serialize(sformat, lev)
else:
res = inst
return res
def msg_list_ser(insts, sformat, lev=0):
return [msg_ser(inst, sformat, lev) for inst in insts]
def claims_ser(val, sformat="urlencoded", lev=0):
# everything in c_extension
if isinstance(val, str):
item = val
elif isinstance(val, list):
item = val[0]
else:
item = val
if isinstance(item, Message):
return item.serialize(method=sformat, lev=lev + 1)
if sformat == "urlencoded":
assert isinstance( # nosec
item, dict
) # We cannot urlencode anything else than Mapping
res = urlencode(item)
elif sformat == "json":
if lev:
res = item
else:
res = json.dumps(item)
elif sformat == "dict":
if isinstance(item, dict):
res = item
else:
raise MessageException("Wrong type: %s" % type(item))
else:
raise PyoidcError("Unknown sformat: %s" % sformat, val)
return res
def registration_request_deser(val, sformat="urlencoded"):
if sformat in ["dict", "json"]:
if not isinstance(val, str):
val = json.dumps(val)
sformat = "json"
return RegistrationRequest().deserialize(val, sformat)
def claims_request_deser(val, sformat="json"):
# never 'urlencoded'
if sformat == "urlencoded":
sformat = "json"
if sformat in ["dict", "json"]:
if not isinstance(val, str):
val = json.dumps(val)
sformat = "json"
return ClaimsRequest().deserialize(val, sformat)
OPTIONAL_ADDRESS = ParamDefinition(Message, False, msg_ser, address_deser, False)
OPTIONAL_LOGICAL = ParamDefinition(bool, False, None, None, False)
OPTIONAL_MULTIPLE_Claims = ParamDefinition(
Message, False, claims_ser, claims_deser, False
)
SINGLE_OPTIONAL_IDTOKEN = ParamDefinition(str, False, msg_ser, None, False)
SINGLE_OPTIONAL_REGISTRATION_REQUEST = ParamDefinition(
Message, False, msg_ser, registration_request_deser, False
)
SINGLE_OPTIONAL_CLAIMSREQ = ParamDefinition(
Message, False, msg_ser_json, claims_request_deser, False
)
OPTIONAL_MESSAGE = ParamDefinition(Message, False, msg_ser, message_deser, False)
REQUIRED_MESSAGE = ParamDefinition(Message, True, msg_ser, message_deser, False)
# ----------------------------------------------------------------------------
SCOPE_CHARSET = []
for char in ["\x21", ("\x23", "\x5b"), ("\x5d", "\x7E")]:
if isinstance(char, tuple):
c = char[0]
while c <= char[1]:
SCOPE_CHARSET.append(c)
c = chr(ord(c) + 1)
else:
SCOPE_CHARSET.append(set)
def check_char_set(string, allowed):
for c in string:
if c not in allowed:
raise NotAllowedValue("'%c' not in the allowed character set" % c)
TOKEN_VERIFY_ARGS = ["key", "keyjar", "algs", "sender"]
def verify_id_token(instance, check_hash=False, **kwargs):
# Try to decode the JWT, checks the signature
args = {}
for arg in TOKEN_VERIFY_ARGS:
try:
args[arg] = kwargs[arg]
except KeyError:
pass
_jws = str(instance["id_token"])
# It can be encrypted, so try to decrypt first
_jwe = JWE_factory(_jws)
if _jwe is not None:
try:
_jws = _jwe.decrypt(keys=kwargs["keyjar"].get_decrypt_key())
except JWEException as err:
raise VerificationError("Could not decrypt id_token", err)
_packer = JWT()
_body = _packer.unpack(_jws).payload()
if "keyjar" in kwargs:
try:
if _body["iss"] not in kwargs["keyjar"]:
raise ValueError("Unknown issuer")
except KeyError:
raise MissingRequiredAttribute("iss")
if _jwe is not None:
# Use the original encrypted token to set correct headers
idt = IdToken().from_jwt(str(instance["id_token"]), **args)
else:
idt = IdToken().from_jwt(_jws, **args)
if not idt.verify(**kwargs):
raise VerificationError("Could not verify id_token", idt)
if check_hash:
_alg = idt.jws_header["alg"]
# What if _alg == 'none'
hfunc = "HS" + _alg[-3:]
if "access_token" in instance:
if "at_hash" not in idt:
raise MissingRequiredAttribute("Missing at_hash property", idt)
if idt["at_hash"] != jws.left_hash(instance["access_token"], hfunc):
raise AtHashError("Failed to verify access_token hash", idt)
if "code" in instance:
if "c_hash" not in idt:
raise MissingRequiredAttribute("Missing c_hash property", idt)
if idt["c_hash"] != jws.left_hash(instance["code"], hfunc):
raise CHashError("Failed to verify code hash", idt)
return idt
# -----------------------------------------------------------------------------
class RefreshAccessTokenRequest(message.RefreshAccessTokenRequest):
pass
class TokenErrorResponse(message.TokenErrorResponse):
pass
class AccessTokenResponse(message.AccessTokenResponse):
c_param = message.AccessTokenResponse.c_param.copy()
c_param.update({"id_token": SINGLE_OPTIONAL_STRING})
def verify(self, **kwargs):
super().verify(**kwargs)
if "id_token" in self:
# replace the JWT with the verified IdToken instance
self["id_token"] = verify_id_token(self, **kwargs)
return True
class UserInfoRequest(Message):
c_param = {"access_token": SINGLE_OPTIONAL_STRING}
class AuthorizationResponse(message.AuthorizationResponse, message.AccessTokenResponse):
c_param = message.AuthorizationResponse.c_param.copy()
c_param.update(message.AccessTokenResponse.c_param)
c_param.update(
{
"code": SINGLE_OPTIONAL_STRING,
"access_token": SINGLE_OPTIONAL_STRING,
"token_type": SINGLE_OPTIONAL_STRING,
"id_token": SINGLE_OPTIONAL_IDTOKEN,
}
)
def verify(self, **kwargs):
super().verify(**kwargs)
if "aud" in self:
if "client_id" in kwargs:
# check that it's for me
if kwargs["client_id"] not in self["aud"]:
return False
if "id_token" in self:
self["id_token"] = verify_id_token(self, check_hash=True, **kwargs)
if "access_token" in self:
if "token_type" not in self:
raise MissingRequiredValue("Missing token_type parameter", self)
return True
class AuthorizationErrorResponse(message.AuthorizationErrorResponse):
c_allowed_values = message.AuthorizationErrorResponse.c_allowed_values.copy()
c_allowed_values["error"].extend(
[
"interaction_required",
"login_required",
"session_selection_required",
"consent_required",
"invalid_request_uri",
"invalid_request_object",
"registration_not_supported",
"request_not_supported",
"request_uri_not_supported",
]
)
class AuthorizationRequest(message.AuthorizationRequest):
c_param = message.AuthorizationRequest.c_param.copy()
c_param.update(
{
"scope": REQUIRED_LIST_OF_SP_SEP_STRINGS,
"redirect_uri": SINGLE_REQUIRED_STRING,
"nonce": SINGLE_OPTIONAL_STRING,
"display": SINGLE_OPTIONAL_STRING,
"prompt": OPTIONAL_LIST_OF_STRINGS,
"max_age": SINGLE_OPTIONAL_INT,
"ui_locales": OPTIONAL_LIST_OF_SP_SEP_STRINGS,
"claims_locales": OPTIONAL_LIST_OF_SP_SEP_STRINGS,
"id_token_hint": SINGLE_OPTIONAL_STRING,
"login_hint": SINGLE_OPTIONAL_STRING,
"acr_values": OPTIONAL_LIST_OF_SP_SEP_STRINGS,
"claims": SINGLE_OPTIONAL_CLAIMSREQ,
"registration": SINGLE_OPTIONAL_JSON,
"request": SINGLE_OPTIONAL_STRING,
"request_uri": SINGLE_OPTIONAL_STRING,
"response_mode": SINGLE_OPTIONAL_STRING,
}
)
c_allowed_values = message.AuthorizationRequest.c_allowed_values.copy()
c_allowed_values.update(
{
"display": ["page", "popup", "touch", "wap"],
"prompt": ["none", "login", "consent", "select_account"],
}
)
def verify(self, **kwargs):
"""
Check that the request is valid.
Authorization Request parameters that are OPTIONAL in the OAuth 2.0
specification MAY be included in the OpenID Request Object without also
passing them as OAuth 2.0 Authorization Request parameters, with one
exception: The scope parameter MUST always be present in OAuth 2.0
Authorization Request parameters.
All parameter values that are present both in the OAuth 2.0
Authorization Request and in the OpenID Request Object MUST exactly match.
"""
super().verify(**kwargs)
args = {}
for arg in ["key", "keyjar", "opponent_id", "sender"]:
try:
args[arg] = kwargs[arg]
except KeyError:
pass
if "opponent_id" not in kwargs:
args["opponent_id"] = self["client_id"]
if "request" in self:
if isinstance(self["request"], str):
# Try to decode the JWT, checks the signature
oidr = OpenIDRequest().from_jwt(str(self["request"]), **args)
# verify that nothing is change in the original message
for key, val in oidr.items():
if key in self and self[key] != val:
raise AssertionError()
# replace the JWT with the parsed and verified instance
self["request"] = oidr
if "id_token_hint" in self:
if isinstance(self["id_token_hint"], str):
idt = IdToken().from_jwt(str(self["id_token_hint"]), **args)
self["id_token_hint"] = idt
if "response_type" not in self:
raise MissingRequiredAttribute("response_type missing", self)
_rt = self["response_type"]
if "token" in _rt or "id_token" in _rt:
if "nonce" not in self:
raise MissingRequiredAttribute("Nonce missing", self)
if "openid" not in self.get("scope", []):
raise MissingRequiredValue("openid not in scope", self)
if "offline_access" in self.get("scope", []):
if "prompt" not in self or "consent" not in self["prompt"]:
raise MissingRequiredValue("consent in prompt", self)
if "prompt" in self:
if "none" in self["prompt"] and len(self["prompt"]) > 1:
raise InvalidRequest("prompt none combined with other value", self)
return True
class AccessTokenRequest(message.AccessTokenRequest):
c_param = message.AccessTokenRequest.c_param.copy()
c_param.update(
{
"client_assertion_type": SINGLE_OPTIONAL_STRING,
"client_assertion": SINGLE_OPTIONAL_STRING,
}
)
c_default = {"grant_type": "authorization_code"}
c_allowed_values = {
"client_assertion_type": [
"urn:ietf:params:oauth:client-assertion-type:jwt-bearer"
]
}
class AddressClaim(Message):
c_param = {
"formatted": SINGLE_OPTIONAL_STRING,
"street_address": SINGLE_OPTIONAL_STRING,
"locality": SINGLE_OPTIONAL_STRING,
"region": SINGLE_OPTIONAL_STRING,
"postal_code": SINGLE_OPTIONAL_STRING,
"country": SINGLE_OPTIONAL_STRING,
}
class OpenIDSchema(Message):
c_param = {
"sub": SINGLE_REQUIRED_STRING,
"name": SINGLE_OPTIONAL_STRING,
"given_name": SINGLE_OPTIONAL_STRING,
"family_name": SINGLE_OPTIONAL_STRING,
"middle_name": SINGLE_OPTIONAL_STRING,
"nickname": SINGLE_OPTIONAL_STRING,
"preferred_username": SINGLE_OPTIONAL_STRING,
"profile": SINGLE_OPTIONAL_STRING,
"picture": SINGLE_OPTIONAL_STRING,
"website": SINGLE_OPTIONAL_STRING,
"email": SINGLE_OPTIONAL_STRING,
"email_verified": SINGLE_OPTIONAL_BOOLEAN,
"gender": SINGLE_OPTIONAL_STRING,
"birthdate": SINGLE_OPTIONAL_STRING,
"zoneinfo": SINGLE_OPTIONAL_STRING,
"locale": SINGLE_OPTIONAL_STRING,
"phone_number": SINGLE_OPTIONAL_STRING,
"phone_number_verified": SINGLE_OPTIONAL_BOOLEAN,
"address": OPTIONAL_ADDRESS,
"updated_at": SINGLE_OPTIONAL_INT,
"_claim_names": OPTIONAL_MESSAGE,
"_claim_sources": OPTIONAL_MESSAGE,
}
def verify(self, **kwargs):
super().verify(**kwargs)
if "birthdate" in self:
# Either YYYY-MM-DD or just YYYY or 0000-MM-DD
try:
time.strptime(self["birthdate"], "%Y-%m-%d")
except ValueError:
try:
time.strptime(self["birthdate"], "%Y")
except ValueError:
try:
time.strptime(self["birthdate"], "0000-%m-%d")
except ValueError:
raise VerificationError("Birthdate format error", self)
if any(val is None for val in self.values()):
return False
return True
class RegistrationRequest(Message):
c_param = {
"redirect_uris": REQUIRED_LIST_OF_STRINGS,
"response_types": OPTIONAL_LIST_OF_STRINGS,
"grant_types": OPTIONAL_LIST_OF_STRINGS,
"application_type": SINGLE_OPTIONAL_STRING,
"contacts": OPTIONAL_LIST_OF_STRINGS,
"client_name": SINGLE_OPTIONAL_STRING,
"logo_uri": SINGLE_OPTIONAL_STRING,
"client_uri": SINGLE_OPTIONAL_STRING,
"policy_uri": SINGLE_OPTIONAL_STRING,
"tos_uri": SINGLE_OPTIONAL_STRING,
"jwks": SINGLE_OPTIONAL_STRING,
"jwks_uri": SINGLE_OPTIONAL_STRING,
"sector_identifier_uri": SINGLE_OPTIONAL_STRING,
"subject_type": SINGLE_OPTIONAL_STRING,
"id_token_signed_response_alg": SINGLE_OPTIONAL_STRING,
"id_token_encrypted_response_alg": SINGLE_OPTIONAL_STRING,
"id_token_encrypted_response_enc": SINGLE_OPTIONAL_STRING,
"userinfo_signed_response_alg": SINGLE_OPTIONAL_STRING,
"userinfo_encrypted_response_alg": SINGLE_OPTIONAL_STRING,
"userinfo_encrypted_response_enc": SINGLE_OPTIONAL_STRING,
"request_object_signing_alg": SINGLE_OPTIONAL_STRING,
"request_object_encryption_alg": SINGLE_OPTIONAL_STRING,
"request_object_encryption_enc": SINGLE_OPTIONAL_STRING,
"token_endpoint_auth_method": SINGLE_OPTIONAL_STRING,
"token_endpoint_auth_signing_alg": SINGLE_OPTIONAL_STRING,
"default_max_age": SINGLE_OPTIONAL_INT,
"require_auth_time": OPTIONAL_LOGICAL,
"default_acr_values": OPTIONAL_LIST_OF_STRINGS,
"initiate_login_uri": SINGLE_OPTIONAL_STRING,
"request_uris": OPTIONAL_LIST_OF_STRINGS,
"post_logout_redirect_uris": OPTIONAL_LIST_OF_STRINGS,
"frontchannel_logout_uri": SINGLE_OPTIONAL_STRING,
"frontchannel_logout_session_required": OPTIONAL_LOGICAL,
"backchannel_logout_uri": SINGLE_OPTIONAL_STRING,
"backchannel_logout_session_required": OPTIONAL_LOGICAL,
}
c_default = {"application_type": "web", "response_types": ["code"]}
c_allowed_values = {
"application_type": ["native", "web"],
"subject_type": ["public", "pairwise"],
}
def verify(self, **kwargs):
super().verify(**kwargs)
if "initiate_login_uri" in self and not self["initiate_login_uri"].startswith(
"https:"
):
raise AssertionError()
for param in [
"request_object_encryption",
"id_token_encrypted_response",
"userinfo_encrypted_response",
]:
alg_param = "%s_alg" % param
enc_param = "%s_enc" % param
if alg_param in self:
if enc_param not in self:
self[enc_param] = "A128CBC-HS256"
# both or none
if enc_param in self and alg_param not in self:
raise AssertionError()
if (
"token_endpoint_auth_signing_alg" in self
and self["token_endpoint_auth_signing_alg"] == "none"
):
raise AssertionError()
return True
class RegistrationResponse(Message):
"""Response to client_register registration requests."""
c_param = {
"client_id": SINGLE_REQUIRED_STRING,
"client_secret": SINGLE_OPTIONAL_STRING,
"registration_access_token": SINGLE_OPTIONAL_STRING,
"registration_client_uri": SINGLE_OPTIONAL_STRING,
"client_id_issued_at": SINGLE_OPTIONAL_INT,
"client_secret_expires_at": SINGLE_OPTIONAL_INT,
}
c_param.update(RegistrationRequest.c_param)
def verify(self, **kwargs):
"""
Verify that the response is valid.
Implementations MUST either return both a Client Configuration Endpoint
and a Registration Access Token or neither of them.
:param kwargs:
:return: True if the message is OK otherwise False
"""
super(RegistrationResponse, self).verify(**kwargs)
has_reg_uri = "registration_client_uri" in self
has_reg_at = "registration_access_token" in self
if has_reg_uri != has_reg_at:
raise VerificationError(
(
"Only one of registration_client_uri"
" and registration_access_token present"
),
self,
)
return True
class ClientRegistrationErrorResponse(message.ErrorResponse):
c_allowed_values = {
"error": [
"invalid_redirect_uri",
"invalid_client_metadata",
"invalid_configuration_parameter",
]
}
class IdToken(OpenIDSchema):
c_param = OpenIDSchema.c_param.copy()
c_param.update(
{
"iss": SINGLE_REQUIRED_STRING,
"sub": SINGLE_REQUIRED_STRING,
"aud": REQUIRED_LIST_OF_STRINGS, # Array of strings or string
"exp": SINGLE_REQUIRED_INT,
"iat": SINGLE_REQUIRED_INT,
"auth_time": SINGLE_OPTIONAL_INT,
"nonce": SINGLE_OPTIONAL_STRING,
"at_hash": SINGLE_OPTIONAL_STRING,
"c_hash": SINGLE_OPTIONAL_STRING,
"acr": SINGLE_OPTIONAL_STRING,
"amr": OPTIONAL_LIST_OF_STRINGS,
"azp": SINGLE_OPTIONAL_STRING,
"sub_jwk": SINGLE_OPTIONAL_STRING,
}
)
def verify(self, **kwargs):
super(IdToken, self).verify(**kwargs)
try:
if kwargs["iss"] != self["iss"]:
raise IssuerMismatch("{} != {}".format(kwargs["iss"], self["iss"]))
except KeyError:
pass
if "aud" in self:
if "client_id" in kwargs:
# check that I'm among the recipients
if kwargs["client_id"] not in self["aud"]:
raise NotForMe(
"{} not in aud:{}".format(kwargs["client_id"], self["aud"]),
self,
)
# Then azp has to be present and be one of the aud values
if len(self["aud"]) > 1:
if "azp" not in self:
raise VerificationError("azp missing", self)
if self["azp"] not in self["aud"]:
raise VerificationError("Mismatch between azp and aud claims", self)
if "azp" in self:
if "client_id" in kwargs:
if kwargs["client_id"] != self["azp"]:
raise NotForMe(
"{} != azp:{}".format(kwargs["client_id"], self["azp"]), self
)
_now = time_util.utc_time_sans_frac()
try:
_skew = kwargs["skew"]
except KeyError:
_skew = 0
try:
_exp = self["exp"]
except KeyError:
raise MissingRequiredAttribute("exp")
else:
if (_now - _skew) > _exp:
raise EXPError("Invalid expiration time")
try:
_storage_time = kwargs["nonce_storage_time"]
except KeyError:
_storage_time = NONCE_STORAGE_TIME
try:
_iat = self["iat"]
except KeyError:
raise MissingRequiredAttribute("iat")
else:
if (_iat + _storage_time) < (_now - _skew):
raise IATError("Issued too long ago")
return True
class StateFullMessage(Message):
c_param = {"state": SINGLE_REQUIRED_STRING}
class RefreshSessionRequest(StateFullMessage):
c_param = StateFullMessage.c_param.copy()
c_param.update(
{"id_token": SINGLE_REQUIRED_STRING, "redirect_url": SINGLE_REQUIRED_STRING}
)
def verify(self, **kwargs):
super(RefreshSessionRequest, self).verify(**kwargs)
if "id_token" in self:
self["id_token"] = verify_id_token(self, check_hash=True, **kwargs)
class RefreshSessionResponse(StateFullMessage):
c_param = StateFullMessage.c_param.copy()
c_param.update({"id_token": SINGLE_REQUIRED_STRING})
def verify(self, **kwargs):
super(RefreshSessionResponse, self).verify(**kwargs)
if "id_token" in self:
self["id_token"] = verify_id_token(self, check_hash=True, **kwargs)
class CheckSessionRequest(Message):
c_param = {"id_token": SINGLE_REQUIRED_STRING}
def verify(self, **kwargs):
super(CheckSessionRequest, self).verify(**kwargs)
if "id_token" in self:
self["id_token"] = verify_id_token(self, check_hash=True, **kwargs)
class CheckIDRequest(Message):
c_param = {"access_token": SINGLE_REQUIRED_STRING}
class EndSessionRequest(Message):
c_param = {
"id_token_hint": SINGLE_OPTIONAL_STRING,
"post_logout_redirect_uri": SINGLE_OPTIONAL_STRING,
"state": SINGLE_OPTIONAL_STRING,
}
class EndSessionResponse(Message):
c_param = {"state": SINGLE_OPTIONAL_STRING}
class Claims(Message):
pass
class ClaimsRequest(Message):
c_param = {
"userinfo": OPTIONAL_MULTIPLE_Claims,
"id_token": OPTIONAL_MULTIPLE_Claims,
}
class OpenIDRequest(AuthorizationRequest):
pass
class ProviderConfigurationResponse(Message):
c_param = {
"issuer": SINGLE_REQUIRED_STRING,
"authorization_endpoint": SINGLE_REQUIRED_STRING,
"token_endpoint": SINGLE_OPTIONAL_STRING,
"userinfo_endpoint": SINGLE_OPTIONAL_STRING,
"jwks_uri": SINGLE_REQUIRED_STRING,
"registration_endpoint": SINGLE_OPTIONAL_STRING,
"scopes_supported": OPTIONAL_LIST_OF_STRINGS,
"response_types_supported": REQUIRED_LIST_OF_STRINGS,
"response_modes_supported": OPTIONAL_LIST_OF_STRINGS,
"grant_types_supported": OPTIONAL_LIST_OF_STRINGS,
"acr_values_supported": OPTIONAL_LIST_OF_STRINGS,
"subject_types_supported": REQUIRED_LIST_OF_STRINGS,
"id_token_signing_alg_values_supported": REQUIRED_LIST_OF_STRINGS,
"id_token_encryption_alg_values_supported": OPTIONAL_LIST_OF_STRINGS,
"id_token_encryption_enc_values_supported": OPTIONAL_LIST_OF_STRINGS,
"userinfo_signing_alg_values_supported": OPTIONAL_LIST_OF_STRINGS,
"userinfo_encryption_alg_values_supported": OPTIONAL_LIST_OF_STRINGS,
"userinfo_encryption_enc_values_supported": OPTIONAL_LIST_OF_STRINGS,
"request_object_signing_alg_values_supported": OPTIONAL_LIST_OF_STRINGS,
"request_object_encryption_alg_values_supported": OPTIONAL_LIST_OF_STRINGS,
"request_object_encryption_enc_values_supported": OPTIONAL_LIST_OF_STRINGS,
"token_endpoint_auth_methods_supported": OPTIONAL_LIST_OF_STRINGS,
"token_endpoint_auth_signing_alg_values_supported": OPTIONAL_LIST_OF_STRINGS,
"display_values_supported": OPTIONAL_LIST_OF_STRINGS,
"claim_types_supported": OPTIONAL_LIST_OF_STRINGS,
"claims_supported": OPTIONAL_LIST_OF_STRINGS,
"service_documentation": SINGLE_OPTIONAL_STRING,
"claims_locales_supported": OPTIONAL_LIST_OF_STRINGS,
"ui_locales_supported": OPTIONAL_LIST_OF_STRINGS,
"claims_parameter_supported": SINGLE_OPTIONAL_BOOLEAN,
"request_parameter_supported": SINGLE_OPTIONAL_BOOLEAN,
"request_uri_parameter_supported": SINGLE_OPTIONAL_BOOLEAN,
"require_request_uri_registration": SINGLE_OPTIONAL_BOOLEAN,
"op_policy_uri": SINGLE_OPTIONAL_STRING,
"op_tos_uri": SINGLE_OPTIONAL_STRING,
"check_session_iframe": SINGLE_OPTIONAL_STRING,
"end_session_endpoint": SINGLE_OPTIONAL_STRING,
"frontchannel_logout_supported": SINGLE_OPTIONAL_BOOLEAN,
"frontchannel_logout_session_supported": SINGLE_OPTIONAL_BOOLEAN,
"backchannel_logout_supported": SINGLE_OPTIONAL_BOOLEAN,
"backchannel_logout_session_supported": SINGLE_OPTIONAL_BOOLEAN,
}
c_default = {
"version": "3.0",
"token_endpoint_auth_methods_supported": ["client_secret_basic"],
"claims_parameter_supported": False,
"request_parameter_supported": False,
"request_uri_parameter_supported": True,
"require_request_uri_registration": False,
"grant_types_supported": ["authorization_code", "implicit"],
"frontchannel_logout_supported": False,
"frontchannel_logout_session_supported": False,
"backchannel_logout_supported": False,
"backchannel_logout_session_supported": False,
}
def verify(self, **kwargs):
super().verify(**kwargs)
if "scopes_supported" in self:
if "openid" not in self["scopes_supported"]:
raise AssertionError()
for scope in self["scopes_supported"]:
check_char_set(scope, SCOPE_CHARSET)
parts = urlparse(self["issuer"])
if parts.scheme != "https":
raise SchemeError("Not HTTPS")
if parts.query or parts.fragment:
raise AssertionError()
if (
any("code" in rt for rt in self["response_types_supported"])
and "token_endpoint" not in self
):
raise MissingRequiredAttribute("token_endpoint")
return True
class AuthnToken(Message):
c_param = {
"iss": SINGLE_REQUIRED_STRING,
"sub": SINGLE_REQUIRED_STRING,
"aud": REQUIRED_LIST_OF_STRINGS, # Array of strings or string
"jti": SINGLE_REQUIRED_STRING,
"exp": SINGLE_REQUIRED_INT,
"iat": SINGLE_OPTIONAL_INT,
}
# According to RFC 7519 all claims are optional
class JasonWebToken(Message):
c_param = {
"iss": SINGLE_OPTIONAL_STRING,
"sub": SINGLE_OPTIONAL_STRING,
"aud": OPTIONAL_LIST_OF_STRINGS, # Array of strings or string
"exp": SINGLE_OPTIONAL_INT,
"nbf": SINGLE_OPTIONAL_INT,
"iat": SINGLE_OPTIONAL_INT,
"jti": SINGLE_OPTIONAL_STRING,
}
def jwt_deser(val, sformat="json"):
if sformat == "urlencoded":
sformat = "json"
if sformat in ["dict", "json"]:
if not isinstance(val, str):
val = json.dumps(val)
sformat = "json"
return JasonWebToken().deserialize(val, sformat)
SINGLE_OPTIONAL_JWT = ParamDefinition(Message, False, msg_ser, jwt_deser, False)
class UserInfoErrorResponse(message.ErrorResponse):
c_allowed_values = {
"error": [
"invalid_schema",
"invalid_request",
"invalid_token",
"insufficient_scope",
]
}
class DiscoveryRequest(Message):
c_param = {"principal": SINGLE_REQUIRED_STRING, "service": SINGLE_REQUIRED_STRING}
class DiscoveryResponse(Message):
c_param = {"locations": REQUIRED_LIST_OF_STRINGS}
class ResourceRequest(Message):
c_param = {"access_token": SINGLE_OPTIONAL_STRING}
SCOPE2CLAIMS: Dict[str, List[str]] = {
"openid": ["sub"],
"profile": [
"name",
"given_name",
"family_name",
"middle_name",
"nickname",
"profile",
"picture",
"website",
"gender",
"birthdate",
"zoneinfo",
"locale",
"updated_at",
"preferred_username",
],
"email": ["email", "email_verified"],
"address": ["address"],
"phone": ["phone_number", "phone_number_verified"],
"offline_access": [],
}
# LOGOUT related messages
SINGLE_OPTIONAL_JSON = ParamDefinition(dict, False, json_ser, json_deser, False)
SINGLE_REQUIRED_JSON = ParamDefinition(dict, True, json_ser, json_deser, False)
BACK_CHANNEL_LOGOUT_EVENT = "http://schemas.openid.net/event/backchannel-logout"
class LogoutToken(Message):
"""Defined in https://openid.net/specs/openid-connect-backchannel-1_0.html#LogoutToken ."""
c_param = {
"iss": SINGLE_REQUIRED_STRING,
"sub": SINGLE_OPTIONAL_STRING,
"aud": REQUIRED_LIST_OF_STRINGS, # Array of strings or string
"iat": SINGLE_REQUIRED_INT,
"jti": SINGLE_REQUIRED_STRING,
"events": SINGLE_REQUIRED_JSON,
"sid": SINGLE_OPTIONAL_STRING,
}
def verify(self, **kwargs):
super().verify(**kwargs)
if "nonce" in self:
raise MessageException(
'"nonce" is prohibited from appearing in a LogoutToken.'
)
# Check the 'events' JSON
_keys = list(self["events"].keys())
if len(_keys) != 1:
raise ValueError('Must only be one member in "events"')
if _keys[0] != BACK_CHANNEL_LOGOUT_EVENT:
raise ValueError('Wrong member in "events"')
if self["events"][_keys[0]] != {}:
raise ValueError('Wrong member value in "events"')
# There must be either a 'sub' or a 'sid', and may contain both
if not ("sub" in self or "sid" in self):
raise ValueError('There MUST be either a "sub" or a "sid"')
try:
if kwargs["aud"] not in self["aud"]:
raise NotForMe("Not among intended audience")
except KeyError:
pass
try:
if kwargs["iss"] != self["iss"]:
raise NotForMe("Wrong issuer")
except KeyError:
pass
_now = utc_time_sans_frac()
_skew = kwargs.get("skew", 0)
_iat = self.get("iat", 0)
if _iat and _iat > (_now + _skew):
raise ValueError("Invalid issued_at time")
return True
ID_TOKEN_VERIFY_ARGS = [
"keyjar",
"verify",
"encalg",
"encenc",
"sigalg",
"issuer",
"allow_missing_kid",
"no_kid_issuer",
"trusting",
"skew",
"nonce_storage_time",
"client_id",
]
class BackChannelLogoutRequest(Message):
"""Defines the message used in https://openid.net/specs/openid-connect-backchannel-1_0.html ."""
c_param = {"logout_token": SINGLE_REQUIRED_STRING}
def verify(self, **kwargs):
super().verify(**kwargs)
args = {arg: kwargs[arg] for arg in TOKEN_VERIFY_ARGS if arg in kwargs}
logout_token = LogoutToken().from_jwt(str(self["logout_token"]), **args)
logout_token.verify(**kwargs)
self["logout_token"] = logout_token
logger.info("Verified Logout Token: {}".format(logout_token.to_dict()))
return True
class FrontChannelLogoutRequest(Message):
"""Defines the message used in https://openid.net/specs/openid-connect-frontchannel-1_0.html ."""
c_param = {"iss": SINGLE_OPTIONAL_STRING, "sid": SINGLE_OPTIONAL_STRING}
MSG = {
"RefreshAccessTokenRequest": RefreshAccessTokenRequest,
"TokenErrorResponse": TokenErrorResponse,
"AccessTokenResponse": AccessTokenResponse,
"UserInfoRequest": UserInfoRequest,
"AuthorizationResponse": AuthorizationResponse,
"AuthorizationErrorResponse": AuthorizationErrorResponse,
"AuthorizationRequest": AuthorizationRequest,
"AccessTokenRequest": AccessTokenRequest,
"AddressClaim": AddressClaim,
"OpenIDSchema": OpenIDSchema,
"RegistrationRequest": RegistrationRequest,
"RegistrationResponse": RegistrationResponse,
"ClientRegistrationErrorResponse": ClientRegistrationErrorResponse,
"IdToken": IdToken,
"RefreshSessionRequest": RefreshSessionRequest,
"RefreshSessionResponse": RefreshSessionResponse,
"CheckSessionRequest": CheckSessionRequest,
"CheckIDRequest": CheckIDRequest,
"EndSessionRequest": EndSessionRequest,
"EndSessionResponse": EndSessionResponse,
"Claims": Claims,
"OpenIDRequest": OpenIDRequest,
"ProviderConfigurationResponse": ProviderConfigurationResponse,
"AuthnToken": AuthnToken,
"UserInfoErrorResponse": UserInfoErrorResponse,
"DiscoveryRequest": DiscoveryRequest,
"DiscoveryResponse": DiscoveryResponse,
"ResourceRequest": ResourceRequest,
# LOGOUT messages
"LogoutToken": LogoutToken,
"BackChannelLogoutRequest": BackChannelLogoutRequest,
"FrontChannelLogoutRequest": FrontChannelLogoutRequest,
}
def factory(msgtype):
warnings.warn(
"`factory` is deprecated. Use `OIDCMessageFactory` instead.", DeprecationWarning
)
for _, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and issubclass(obj, Message):
try:
if obj.__name__ == msgtype:
return obj
except AttributeError:
pass
# Fall back to basic OAuth2 messages
return message.factory(msgtype)
class OIDCMessageFactory(MessageFactory):
"""Factory that knows OIDC message types."""
authorization_endpoint = MessageTuple(AuthorizationRequest, AuthorizationResponse)
token_endpoint = MessageTuple(AccessTokenRequest, AccessTokenResponse)
refresh_endpoint = MessageTuple(RefreshAccessTokenRequest, AccessTokenResponse)
resource_endpoint = MessageTuple(ResourceRequest, Message)
configuration_endpoint = MessageTuple(Message, ProviderConfigurationResponse)
userinfo_endpoint = MessageTuple(UserInfoRequest, Message)
registration_endpoint = MessageTuple(RegistrationRequest, RegistrationResponse)
endsession_endpoint = MessageTuple(EndSessionRequest, EndSessionResponse)
checkid_endpoint = MessageTuple(CheckIDRequest, IdToken)
checksession_endpoint = MessageTuple(CheckSessionRequest, IdToken)
refreshsession_endpoint = MessageTuple(
RefreshSessionRequest, RefreshSessionResponse
)
discovery_endpoint = MessageTuple(DiscoveryRequest, DiscoveryResponse)
| ./CrossVul/dataset_final_sorted/CWE-347/py/bad_4360_2 |
crossvul-python_data_good_4373_1 | # -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
# Copyright 2019 Matrix.org Federation C.I.C
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Dict,
List,
Optional,
Tuple,
Union,
)
from prometheus_client import Counter, Gauge, Histogram
from twisted.internet import defer
from twisted.internet.abstract import isIPAddress
from twisted.python import failure
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import (
AuthError,
Codes,
FederationError,
IncompatibleRoomVersionError,
NotFoundError,
SynapseError,
UnsupportedRoomVersionError,
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import EventBase
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Edu, Transaction
from synapse.http.endpoint import parse_server_name
from synapse.http.servlet import assert_params_in_dict
from synapse.logging.context import (
make_deferred_yieldable,
nested_logging_context,
run_in_background,
)
from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace
from synapse.logging.utils import log_function
from synapse.replication.http.federation import (
ReplicationFederationSendEduRestServlet,
ReplicationGetQueryRestServlet,
)
from synapse.types import JsonDict, get_domain_from_id
from synapse.util import glob_to_regex, json_decoder, unwrapFirstError
from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.caches.response_cache import ResponseCache
if TYPE_CHECKING:
from synapse.server import HomeServer
# when processing incoming transactions, we try to handle multiple rooms in
# parallel, up to this limit.
TRANSACTION_CONCURRENCY_LIMIT = 10
logger = logging.getLogger(__name__)
received_pdus_counter = Counter("synapse_federation_server_received_pdus", "")
received_edus_counter = Counter("synapse_federation_server_received_edus", "")
received_queries_counter = Counter(
"synapse_federation_server_received_queries", "", ["type"]
)
pdu_process_time = Histogram(
"synapse_federation_server_pdu_process_time", "Time taken to process an event",
)
last_pdu_age_metric = Gauge(
"synapse_federation_last_received_pdu_age",
"The age (in seconds) of the last PDU successfully received from the given domain",
labelnames=("server_name",),
)
class FederationServer(FederationBase):
def __init__(self, hs):
super().__init__(hs)
self.auth = hs.get_auth()
self.handler = hs.get_federation_handler()
self.state = hs.get_state_handler()
self.device_handler = hs.get_device_handler()
# Ensure the following handlers are loaded since they register callbacks
# with FederationHandlerRegistry.
hs.get_directory_handler()
self._federation_ratelimiter = hs.get_federation_ratelimiter()
self._server_linearizer = Linearizer("fed_server")
self._transaction_linearizer = Linearizer("fed_txn_handler")
# We cache results for transaction with the same ID
self._transaction_resp_cache = ResponseCache(
hs, "fed_txn_handler", timeout_ms=30000
) # type: ResponseCache[Tuple[str, str]]
self.transaction_actions = TransactionActions(self.store)
self.registry = hs.get_federation_registry()
# We cache responses to state queries, as they take a while and often
# come in waves.
self._state_resp_cache = ResponseCache(
hs, "state_resp", timeout_ms=30000
) # type: ResponseCache[Tuple[str, str]]
self._state_ids_resp_cache = ResponseCache(
hs, "state_ids_resp", timeout_ms=30000
) # type: ResponseCache[Tuple[str, str]]
self._federation_metrics_domains = (
hs.get_config().federation.federation_metrics_domains
)
async def on_backfill_request(
self, origin: str, room_id: str, versions: List[str], limit: int
) -> Tuple[int, Dict[str, Any]]:
with (await self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
pdus = await self.handler.on_backfill_request(
origin, room_id, versions, limit
)
res = self._transaction_from_pdus(pdus).get_dict()
return 200, res
async def on_incoming_transaction(
self, origin: str, transaction_data: JsonDict
) -> Tuple[int, Dict[str, Any]]:
# keep this as early as possible to make the calculated origin ts as
# accurate as possible.
request_time = self._clock.time_msec()
transaction = Transaction(**transaction_data)
transaction_id = transaction.transaction_id # type: ignore
if not transaction_id:
raise Exception("Transaction missing transaction_id")
logger.debug("[%s] Got transaction", transaction_id)
# We wrap in a ResponseCache so that we de-duplicate retried
# transactions.
return await self._transaction_resp_cache.wrap(
(origin, transaction_id),
self._on_incoming_transaction_inner,
origin,
transaction,
request_time,
)
async def _on_incoming_transaction_inner(
self, origin: str, transaction: Transaction, request_time: int
) -> Tuple[int, Dict[str, Any]]:
# Use a linearizer to ensure that transactions from a remote are
# processed in order.
with await self._transaction_linearizer.queue(origin):
# We rate limit here *after* we've queued up the incoming requests,
# so that we don't fill up the ratelimiter with blocked requests.
#
# This is important as the ratelimiter allows N concurrent requests
# at a time, and only starts ratelimiting if there are more requests
# than that being processed at a time. If we queued up requests in
# the linearizer/response cache *after* the ratelimiting then those
# queued up requests would count as part of the allowed limit of N
# concurrent requests.
with self._federation_ratelimiter.ratelimit(origin) as d:
await d
result = await self._handle_incoming_transaction(
origin, transaction, request_time
)
return result
async def _handle_incoming_transaction(
self, origin: str, transaction: Transaction, request_time: int
) -> Tuple[int, Dict[str, Any]]:
""" Process an incoming transaction and return the HTTP response
Args:
origin: the server making the request
transaction: incoming transaction
request_time: timestamp that the HTTP request arrived at
Returns:
HTTP response code and body
"""
response = await self.transaction_actions.have_responded(origin, transaction)
if response:
logger.debug(
"[%s] We've already responded to this request",
transaction.transaction_id, # type: ignore
)
return response
logger.debug("[%s] Transaction is new", transaction.transaction_id) # type: ignore
# Reject if PDU count > 50 or EDU count > 100
if len(transaction.pdus) > 50 or ( # type: ignore
hasattr(transaction, "edus") and len(transaction.edus) > 100 # type: ignore
):
logger.info("Transaction PDU or EDU count too large. Returning 400")
response = {}
await self.transaction_actions.set_response(
origin, transaction, 400, response
)
return 400, response
# We process PDUs and EDUs in parallel. This is important as we don't
# want to block things like to device messages from reaching clients
# behind the potentially expensive handling of PDUs.
pdu_results, _ = await make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(
self._handle_pdus_in_txn, origin, transaction, request_time
),
run_in_background(self._handle_edus_in_txn, origin, transaction),
],
consumeErrors=True,
).addErrback(unwrapFirstError)
)
response = {"pdus": pdu_results}
logger.debug("Returning: %s", str(response))
await self.transaction_actions.set_response(origin, transaction, 200, response)
return 200, response
async def _handle_pdus_in_txn(
self, origin: str, transaction: Transaction, request_time: int
) -> Dict[str, dict]:
"""Process the PDUs in a received transaction.
Args:
origin: the server making the request
transaction: incoming transaction
request_time: timestamp that the HTTP request arrived at
Returns:
A map from event ID of a processed PDU to any errors we should
report back to the sending server.
"""
received_pdus_counter.inc(len(transaction.pdus)) # type: ignore
origin_host, _ = parse_server_name(origin)
pdus_by_room = {} # type: Dict[str, List[EventBase]]
newest_pdu_ts = 0
for p in transaction.pdus: # type: ignore
# FIXME (richardv): I don't think this works:
# https://github.com/matrix-org/synapse/issues/8429
if "unsigned" in p:
unsigned = p["unsigned"]
if "age" in unsigned:
p["age"] = unsigned["age"]
if "age" in p:
p["age_ts"] = request_time - int(p["age"])
del p["age"]
# We try and pull out an event ID so that if later checks fail we
# can log something sensible. We don't mandate an event ID here in
# case future event formats get rid of the key.
possible_event_id = p.get("event_id", "<Unknown>")
# Now we get the room ID so that we can check that we know the
# version of the room.
room_id = p.get("room_id")
if not room_id:
logger.info(
"Ignoring PDU as does not have a room_id. Event ID: %s",
possible_event_id,
)
continue
try:
room_version = await self.store.get_room_version(room_id)
except NotFoundError:
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
continue
except UnsupportedRoomVersionError as e:
# this can happen if support for a given room version is withdrawn,
# so that we still get events for said room.
logger.info("Ignoring PDU: %s", e)
continue
event = event_from_pdu_json(p, room_version)
pdus_by_room.setdefault(room_id, []).append(event)
if event.origin_server_ts > newest_pdu_ts:
newest_pdu_ts = event.origin_server_ts
pdu_results = {}
# we can process different rooms in parallel (which is useful if they
# require callouts to other servers to fetch missing events), but
# impose a limit to avoid going too crazy with ram/cpu.
async def process_pdus_for_room(room_id: str):
logger.debug("Processing PDUs for %s", room_id)
try:
await self.check_server_matches_acl(origin_host, room_id)
except AuthError as e:
logger.warning("Ignoring PDUs for room %s from banned server", room_id)
for pdu in pdus_by_room[room_id]:
event_id = pdu.event_id
pdu_results[event_id] = e.error_dict()
return
for pdu in pdus_by_room[room_id]:
event_id = pdu.event_id
with pdu_process_time.time():
with nested_logging_context(event_id):
try:
await self._handle_received_pdu(origin, pdu)
pdu_results[event_id] = {}
except FederationError as e:
logger.warning("Error handling PDU %s: %s", event_id, e)
pdu_results[event_id] = {"error": str(e)}
except Exception as e:
f = failure.Failure()
pdu_results[event_id] = {"error": str(e)}
logger.error(
"Failed to handle PDU %s",
event_id,
exc_info=(f.type, f.value, f.getTracebackObject()),
)
await concurrently_execute(
process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
)
if newest_pdu_ts and origin in self._federation_metrics_domains:
newest_pdu_age = self._clock.time_msec() - newest_pdu_ts
last_pdu_age_metric.labels(server_name=origin).set(newest_pdu_age / 1000)
return pdu_results
async def _handle_edus_in_txn(self, origin: str, transaction: Transaction):
"""Process the EDUs in a received transaction.
"""
async def _process_edu(edu_dict):
received_edus_counter.inc()
edu = Edu(
origin=origin,
destination=self.server_name,
edu_type=edu_dict["edu_type"],
content=edu_dict["content"],
)
await self.registry.on_edu(edu.edu_type, origin, edu.content)
await concurrently_execute(
_process_edu,
getattr(transaction, "edus", []),
TRANSACTION_CONCURRENCY_LIMIT,
)
async def on_room_state_request(
self, origin: str, room_id: str, event_id: str
) -> Tuple[int, Dict[str, Any]]:
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
in_room = await self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
# we grab the linearizer to protect ourselves from servers which hammer
# us. In theory we might already have the response to this query
# in the cache so we could return it without waiting for the linearizer
# - but that's non-trivial to get right, and anyway somewhat defeats
# the point of the linearizer.
with (await self._server_linearizer.queue((origin, room_id))):
resp = dict(
await self._state_resp_cache.wrap(
(room_id, event_id),
self._on_context_state_request_compute,
room_id,
event_id,
)
)
room_version = await self.store.get_room_version_id(room_id)
resp["room_version"] = room_version
return 200, resp
async def on_state_ids_request(
self, origin: str, room_id: str, event_id: str
) -> Tuple[int, Dict[str, Any]]:
if not event_id:
raise NotImplementedError("Specify an event")
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
in_room = await self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
resp = await self._state_ids_resp_cache.wrap(
(room_id, event_id), self._on_state_ids_request_compute, room_id, event_id,
)
return 200, resp
async def _on_state_ids_request_compute(self, room_id, event_id):
state_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
auth_chain_ids = await self.store.get_auth_chain_ids(state_ids)
return {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
async def _on_context_state_request_compute(
self, room_id: str, event_id: str
) -> Dict[str, list]:
if event_id:
pdus = await self.handler.get_state_for_pdu(room_id, event_id)
else:
pdus = (await self.state.get_current_state(room_id)).values()
auth_chain = await self.store.get_auth_chain([pdu.event_id for pdu in pdus])
return {
"pdus": [pdu.get_pdu_json() for pdu in pdus],
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
}
async def on_pdu_request(
self, origin: str, event_id: str
) -> Tuple[int, Union[JsonDict, str]]:
pdu = await self.handler.get_persisted_pdu(origin, event_id)
if pdu:
return 200, self._transaction_from_pdus([pdu]).get_dict()
else:
return 404, ""
async def on_query_request(
self, query_type: str, args: Dict[str, str]
) -> Tuple[int, Dict[str, Any]]:
received_queries_counter.labels(query_type).inc()
resp = await self.registry.on_query(query_type, args)
return 200, resp
async def on_make_join_request(
self, origin: str, room_id: str, user_id: str, supported_versions: List[str]
) -> Dict[str, Any]:
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
room_version = await self.store.get_room_version_id(room_id)
if room_version not in supported_versions:
logger.warning(
"Room version %s not in %s", room_version, supported_versions
)
raise IncompatibleRoomVersionError(room_version=room_version)
pdu = await self.handler.on_make_join_request(origin, room_id, user_id)
time_now = self._clock.time_msec()
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
async def on_invite_request(
self, origin: str, content: JsonDict, room_version_id: str
) -> Dict[str, Any]:
room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
if not room_version:
raise SynapseError(
400,
"Homeserver does not support this room version",
Codes.UNSUPPORTED_ROOM_VERSION,
)
pdu = event_from_pdu_json(content, room_version)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
pdu = await self._check_sigs_and_hash(room_version, pdu)
ret_pdu = await self.handler.on_invite_request(origin, pdu, room_version)
time_now = self._clock.time_msec()
return {"event": ret_pdu.get_pdu_json(time_now)}
async def on_send_join_request(
self, origin: str, content: JsonDict
) -> Dict[str, Any]:
logger.debug("on_send_join_request: content: %s", content)
assert_params_in_dict(content, ["room_id"])
room_version = await self.store.get_room_version(content["room_id"])
pdu = event_from_pdu_json(content, room_version)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
pdu = await self._check_sigs_and_hash(room_version, pdu)
res_pdus = await self.handler.on_send_join_request(origin, pdu)
time_now = self._clock.time_msec()
return {
"state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
"auth_chain": [p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]],
}
async def on_make_leave_request(
self, origin: str, room_id: str, user_id: str
) -> Dict[str, Any]:
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
pdu = await self.handler.on_make_leave_request(origin, room_id, user_id)
room_version = await self.store.get_room_version_id(room_id)
time_now = self._clock.time_msec()
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
async def on_send_leave_request(self, origin: str, content: JsonDict) -> dict:
logger.debug("on_send_leave_request: content: %s", content)
assert_params_in_dict(content, ["room_id"])
room_version = await self.store.get_room_version(content["room_id"])
pdu = event_from_pdu_json(content, room_version)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
pdu = await self._check_sigs_and_hash(room_version, pdu)
await self.handler.on_send_leave_request(origin, pdu)
return {}
async def on_event_auth(
self, origin: str, room_id: str, event_id: str
) -> Tuple[int, Dict[str, Any]]:
with (await self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
time_now = self._clock.time_msec()
auth_pdus = await self.handler.on_event_auth(event_id)
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
return 200, res
@log_function
async def on_query_client_keys(
self, origin: str, content: Dict[str, str]
) -> Tuple[int, Dict[str, Any]]:
return await self.on_query_request("client_keys", content)
async def on_query_user_devices(
self, origin: str, user_id: str
) -> Tuple[int, Dict[str, Any]]:
keys = await self.device_handler.on_federation_query_user_devices(user_id)
return 200, keys
@trace
async def on_claim_client_keys(
self, origin: str, content: JsonDict
) -> Dict[str, Any]:
query = []
for user_id, device_keys in content.get("one_time_keys", {}).items():
for device_id, algorithm in device_keys.items():
query.append((user_id, device_id, algorithm))
log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
results = await self.store.claim_e2e_one_time_keys(query)
json_result = {} # type: Dict[str, Dict[str, dict]]
for user_id, device_keys in results.items():
for device_id, keys in device_keys.items():
for key_id, json_str in keys.items():
json_result.setdefault(user_id, {})[device_id] = {
key_id: json_decoder.decode(json_str)
}
logger.info(
"Claimed one-time-keys: %s",
",".join(
(
"%s for %s:%s" % (key_id, user_id, device_id)
for user_id, user_keys in json_result.items()
for device_id, device_keys in user_keys.items()
for key_id, _ in device_keys.items()
)
),
)
return {"one_time_keys": json_result}
async def on_get_missing_events(
self,
origin: str,
room_id: str,
earliest_events: List[str],
latest_events: List[str],
limit: int,
) -> Dict[str, list]:
with (await self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
logger.debug(
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
" limit: %d",
earliest_events,
latest_events,
limit,
)
missing_events = await self.handler.on_get_missing_events(
origin, room_id, earliest_events, latest_events, limit
)
if len(missing_events) < 5:
logger.debug(
"Returning %d events: %r", len(missing_events), missing_events
)
else:
logger.debug("Returning %d events", len(missing_events))
time_now = self._clock.time_msec()
return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]}
@log_function
async def on_openid_userinfo(self, token: str) -> Optional[str]:
ts_now_ms = self._clock.time_msec()
return await self.store.get_user_id_for_open_id_token(token, ts_now_ms)
def _transaction_from_pdus(self, pdu_list: List[EventBase]) -> Transaction:
"""Returns a new Transaction containing the given PDUs suitable for
transmission.
"""
time_now = self._clock.time_msec()
pdus = [p.get_pdu_json(time_now) for p in pdu_list]
return Transaction(
origin=self.server_name,
pdus=pdus,
origin_server_ts=int(time_now),
destination=None,
)
async def _handle_received_pdu(self, origin: str, pdu: EventBase) -> None:
""" Process a PDU received in a federation /send/ transaction.
If the event is invalid, then this method throws a FederationError.
(The error will then be logged and sent back to the sender (which
probably won't do anything with it), and other events in the
transaction will be processed as normal).
It is likely that we'll then receive other events which refer to
this rejected_event in their prev_events, etc. When that happens,
we'll attempt to fetch the rejected event again, which will presumably
fail, so those second-generation events will also get rejected.
Eventually, we get to the point where there are more than 10 events
between any new events and the original rejected event. Since we
only try to backfill 10 events deep on received pdu, we then accept the
new event, possibly introducing a discontinuity in the DAG, with new
forward extremities, so normal service is approximately returned,
until we try to backfill across the discontinuity.
Args:
origin: server which sent the pdu
pdu: received pdu
Raises: FederationError if the signatures / hash do not match, or
if the event was unacceptable for any other reason (eg, too large,
too many prev_events, couldn't find the prev_events)
"""
# check that it's actually being sent from a valid destination to
# workaround bug #1753 in 0.18.5 and 0.18.6
if origin != get_domain_from_id(pdu.sender):
# We continue to accept join events from any server; this is
# necessary for the federation join dance to work correctly.
# (When we join over federation, the "helper" server is
# responsible for sending out the join event, rather than the
# origin. See bug #1893. This is also true for some third party
# invites).
if not (
pdu.type == "m.room.member"
and pdu.content
and pdu.content.get("membership", None)
in (Membership.JOIN, Membership.INVITE)
):
logger.info(
"Discarding PDU %s from invalid origin %s", pdu.event_id, origin
)
return
else:
logger.info("Accepting join PDU %s from %s", pdu.event_id, origin)
# We've already checked that we know the room version by this point
room_version = await self.store.get_room_version(pdu.room_id)
# Check signature.
try:
pdu = await self._check_sigs_and_hash(room_version, pdu)
except SynapseError as e:
raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id)
await self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
def __str__(self):
return "<ReplicationLayer(%s)>" % self.server_name
async def exchange_third_party_invite(
self, sender_user_id: str, target_user_id: str, room_id: str, signed: Dict
):
ret = await self.handler.exchange_third_party_invite(
sender_user_id, target_user_id, room_id, signed
)
return ret
async def on_exchange_third_party_invite_request(self, event_dict: Dict):
ret = await self.handler.on_exchange_third_party_invite_request(event_dict)
return ret
async def check_server_matches_acl(self, server_name: str, room_id: str):
"""Check if the given server is allowed by the server ACLs in the room
Args:
server_name: name of server, *without any port part*
room_id: ID of the room to check
Raises:
AuthError if the server does not match the ACL
"""
state_ids = await self.store.get_current_state_ids(room_id)
acl_event_id = state_ids.get((EventTypes.ServerACL, ""))
if not acl_event_id:
return
acl_event = await self.store.get_event(acl_event_id)
if server_matches_acl_event(server_name, acl_event):
return
raise AuthError(code=403, msg="Server is banned from room")
def server_matches_acl_event(server_name: str, acl_event: EventBase) -> bool:
"""Check if the given server is allowed by the ACL event
Args:
server_name: name of server, without any port part
acl_event: m.room.server_acl event
Returns:
True if this server is allowed by the ACLs
"""
logger.debug("Checking %s against acl %s", server_name, acl_event.content)
# first of all, check if literal IPs are blocked, and if so, whether the
# server name is a literal IP
allow_ip_literals = acl_event.content.get("allow_ip_literals", True)
if not isinstance(allow_ip_literals, bool):
logger.warning("Ignoring non-bool allow_ip_literals flag")
allow_ip_literals = True
if not allow_ip_literals:
# check for ipv6 literals. These start with '['.
if server_name[0] == "[":
return False
# check for ipv4 literals. We can just lift the routine from twisted.
if isIPAddress(server_name):
return False
# next, check the deny list
deny = acl_event.content.get("deny", [])
if not isinstance(deny, (list, tuple)):
logger.warning("Ignoring non-list deny ACL %s", deny)
deny = []
for e in deny:
if _acl_entry_matches(server_name, e):
# logger.info("%s matched deny rule %s", server_name, e)
return False
# then the allow list.
allow = acl_event.content.get("allow", [])
if not isinstance(allow, (list, tuple)):
logger.warning("Ignoring non-list allow ACL %s", allow)
allow = []
for e in allow:
if _acl_entry_matches(server_name, e):
# logger.info("%s matched allow rule %s", server_name, e)
return True
# everything else should be rejected.
# logger.info("%s fell through", server_name)
return False
def _acl_entry_matches(server_name: str, acl_entry: Any) -> bool:
if not isinstance(acl_entry, str):
logger.warning(
"Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry)
)
return False
regex = glob_to_regex(acl_entry)
return bool(regex.match(server_name))
class FederationHandlerRegistry:
"""Allows classes to register themselves as handlers for a given EDU or
query type for incoming federation traffic.
"""
def __init__(self, hs: "HomeServer"):
self.config = hs.config
self.http_client = hs.get_simple_http_client()
self.clock = hs.get_clock()
self._instance_name = hs.get_instance_name()
# These are safe to load in monolith mode, but will explode if we try
# and use them. However we have guards before we use them to ensure that
# we don't route to ourselves, and in monolith mode that will always be
# the case.
self._get_query_client = ReplicationGetQueryRestServlet.make_client(hs)
self._send_edu = ReplicationFederationSendEduRestServlet.make_client(hs)
self.edu_handlers = (
{}
) # type: Dict[str, Callable[[str, dict], Awaitable[None]]]
self.query_handlers = {} # type: Dict[str, Callable[[dict], Awaitable[None]]]
# Map from type to instance name that we should route EDU handling to.
self._edu_type_to_instance = {} # type: Dict[str, str]
def register_edu_handler(
self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]]
):
"""Sets the handler callable that will be used to handle an incoming
federation EDU of the given type.
Args:
edu_type: The type of the incoming EDU to register handler for
handler: A callable invoked on incoming EDU
of the given type. The arguments are the origin server name and
the EDU contents.
"""
if edu_type in self.edu_handlers:
raise KeyError("Already have an EDU handler for %s" % (edu_type,))
logger.info("Registering federation EDU handler for %r", edu_type)
self.edu_handlers[edu_type] = handler
def register_query_handler(
self, query_type: str, handler: Callable[[dict], defer.Deferred]
):
"""Sets the handler callable that will be used to handle an incoming
federation query of the given type.
Args:
query_type: Category name of the query, which should match
the string used by make_query.
handler: Invoked to handle
incoming queries of this type. The return will be yielded
on and the result used as the response to the query request.
"""
if query_type in self.query_handlers:
raise KeyError("Already have a Query handler for %s" % (query_type,))
logger.info("Registering federation query handler for %r", query_type)
self.query_handlers[query_type] = handler
def register_instance_for_edu(self, edu_type: str, instance_name: str):
"""Register that the EDU handler is on a different instance than master.
"""
self._edu_type_to_instance[edu_type] = instance_name
async def on_edu(self, edu_type: str, origin: str, content: dict):
if not self.config.use_presence and edu_type == "m.presence":
return
# Check if we have a handler on this instance
handler = self.edu_handlers.get(edu_type)
if handler:
with start_active_span_from_edu(content, "handle_edu"):
try:
await handler(origin, content)
except SynapseError as e:
logger.info("Failed to handle edu %r: %r", edu_type, e)
except Exception:
logger.exception("Failed to handle edu %r", edu_type)
return
# Check if we can route it somewhere else that isn't us
route_to = self._edu_type_to_instance.get(edu_type, "master")
if route_to != self._instance_name:
try:
await self._send_edu(
instance_name=route_to,
edu_type=edu_type,
origin=origin,
content=content,
)
except SynapseError as e:
logger.info("Failed to handle edu %r: %r", edu_type, e)
except Exception:
logger.exception("Failed to handle edu %r", edu_type)
return
# Oh well, let's just log and move on.
logger.warning("No handler registered for EDU type %s", edu_type)
async def on_query(self, query_type: str, args: dict):
handler = self.query_handlers.get(query_type)
if handler:
return await handler(args)
# Check if we can route it somewhere else that isn't us
if self._instance_name == "master":
return await self._get_query_client(query_type=query_type, args=args)
# Uh oh, no handler! Let's raise an exception so the request returns an
# error.
logger.warning("No handler registered for query type %s", query_type)
raise NotFoundError("No handler for Query type '%s'" % (query_type,))
| ./CrossVul/dataset_final_sorted/CWE-400/py/good_4373_1 |
crossvul-python_data_bad_1890_2 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-400/py/bad_1890_2 |
crossvul-python_data_bad_4373_3 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2017-2018 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains handlers for federation events."""
import itertools
import logging
from collections.abc import Container
from http import HTTPStatus
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import attr
from signedjson.key import decode_verify_key_bytes
from signedjson.sign import verify_signed_json
from unpaddedbase64 import decode_base64
from twisted.internet import defer
from synapse import event_auth
from synapse.api.constants import (
EventTypes,
Membership,
RejectedReason,
RoomEncryptionAlgorithms,
)
from synapse.api.errors import (
AuthError,
CodeMessageException,
Codes,
FederationDeniedError,
FederationError,
HttpResponseException,
NotFoundError,
RequestSendFailed,
SynapseError,
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions
from synapse.crypto.event_signing import compute_event_signature
from synapse.event_auth import auth_types_for_event
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.events.validator import EventValidator
from synapse.handlers._base import BaseHandler
from synapse.logging.context import (
make_deferred_yieldable,
nested_logging_context,
preserve_fn,
run_in_background,
)
from synapse.logging.utils import log_function
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
from synapse.replication.http.federation import (
ReplicationCleanRoomRestServlet,
ReplicationFederationSendEventsRestServlet,
ReplicationStoreRoomOnInviteRestServlet,
)
from synapse.state import StateResolutionStore
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.types import (
JsonDict,
MutableStateMap,
PersistedEventPosition,
RoomStreamToken,
StateMap,
UserID,
get_domain_from_id,
)
from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.retryutils import NotRetryingDestination
from synapse.util.stringutils import shortstr
from synapse.visibility import filter_events_for_server
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@attr.s(slots=True)
class _NewEventInfo:
"""Holds information about a received event, ready for passing to _handle_new_events
Attributes:
event: the received event
state: the state at that event
auth_events: the auth_event map for that event
"""
event = attr.ib(type=EventBase)
state = attr.ib(type=Optional[Sequence[EventBase]], default=None)
auth_events = attr.ib(type=Optional[MutableStateMap[EventBase]], default=None)
class FederationHandler(BaseHandler):
"""Handles events that originated from federation.
Responsible for:
a) handling received Pdus before handing them on as Events to the rest
of the homeserver (including auth and state conflict resolutions)
b) converting events that were produced by local clients that may need
to be sent to remote homeservers.
c) doing the necessary dances to invite remote users and join remote
rooms.
"""
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.hs = hs
self.store = hs.get_datastore()
self.storage = hs.get_storage()
self.state_store = self.storage.state
self.federation_client = hs.get_federation_client()
self.state_handler = hs.get_state_handler()
self._state_resolution_handler = hs.get_state_resolution_handler()
self.server_name = hs.hostname
self.keyring = hs.get_keyring()
self.action_generator = hs.get_action_generator()
self.is_mine_id = hs.is_mine_id
self.spam_checker = hs.get_spam_checker()
self.event_creation_handler = hs.get_event_creation_handler()
self._message_handler = hs.get_message_handler()
self._server_notices_mxid = hs.config.server_notices_mxid
self.config = hs.config
self.http_client = hs.get_simple_http_client()
self._instance_name = hs.get_instance_name()
self._replication = hs.get_replication_data_handler()
self._send_events = ReplicationFederationSendEventsRestServlet.make_client(hs)
self._clean_room_for_join_client = ReplicationCleanRoomRestServlet.make_client(
hs
)
if hs.config.worker_app:
self._user_device_resync = ReplicationUserDevicesResyncRestServlet.make_client(
hs
)
self._maybe_store_room_on_invite = ReplicationStoreRoomOnInviteRestServlet.make_client(
hs
)
else:
self._device_list_updater = hs.get_device_handler().device_list_updater
self._maybe_store_room_on_invite = self.store.maybe_store_room_on_invite
# When joining a room we need to queue any events for that room up.
# For each room, a list of (pdu, origin) tuples.
self.room_queues = {} # type: Dict[str, List[Tuple[EventBase, str]]]
self._room_pdu_linearizer = Linearizer("fed_room_pdu")
self.third_party_event_rules = hs.get_third_party_event_rules()
self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages
async def on_receive_pdu(self, origin, pdu, sent_to_us_directly=False) -> None:
""" Process a PDU received via a federation /send/ transaction, or
via backfill of missing prev_events
Args:
origin (str): server which initiated the /send/ transaction. Will
be used to fetch missing events or state.
pdu (FrozenEvent): received PDU
sent_to_us_directly (bool): True if this event was pushed to us; False if
we pulled it as the result of a missing prev_event.
"""
room_id = pdu.room_id
event_id = pdu.event_id
logger.info("handling received PDU: %s", pdu)
# We reprocess pdus when we have seen them only as outliers
existing = await self.store.get_event(
event_id, allow_none=True, allow_rejected=True
)
# FIXME: Currently we fetch an event again when we already have it
# if it has been marked as an outlier.
already_seen = existing and (
not existing.internal_metadata.is_outlier()
or pdu.internal_metadata.is_outlier()
)
if already_seen:
logger.debug("[%s %s]: Already seen pdu", room_id, event_id)
return
# do some initial sanity-checking of the event. In particular, make
# sure it doesn't have hundreds of prev_events or auth_events, which
# could cause a huge state resolution or cascade of event fetches.
try:
self._sanity_check_event(pdu)
except SynapseError as err:
logger.warning(
"[%s %s] Received event failed sanity checks", room_id, event_id
)
raise FederationError("ERROR", err.code, err.msg, affected=pdu.event_id)
# If we are currently in the process of joining this room, then we
# queue up events for later processing.
if room_id in self.room_queues:
logger.info(
"[%s %s] Queuing PDU from %s for now: join in progress",
room_id,
event_id,
origin,
)
self.room_queues[room_id].append((pdu, origin))
return
# If we're not in the room just ditch the event entirely. This is
# probably an old server that has come back and thinks we're still in
# the room (or we've been rejoined to the room by a state reset).
#
# Note that if we were never in the room then we would have already
# dropped the event, since we wouldn't know the room version.
is_in_room = await self.auth.check_host_in_room(room_id, self.server_name)
if not is_in_room:
logger.info(
"[%s %s] Ignoring PDU from %s as we're not in the room",
room_id,
event_id,
origin,
)
return None
state = None
# Get missing pdus if necessary.
if not pdu.internal_metadata.is_outlier():
# We only backfill backwards to the min depth.
min_depth = await self.get_min_depth_for_context(pdu.room_id)
logger.debug("[%s %s] min_depth: %d", room_id, event_id, min_depth)
prevs = set(pdu.prev_event_ids())
seen = await self.store.have_events_in_timeline(prevs)
if min_depth is not None and pdu.depth < min_depth:
# This is so that we don't notify the user about this
# message, to work around the fact that some events will
# reference really really old events we really don't want to
# send to the clients.
pdu.internal_metadata.outlier = True
elif min_depth is not None and pdu.depth > min_depth:
missing_prevs = prevs - seen
if sent_to_us_directly and missing_prevs:
# If we're missing stuff, ensure we only fetch stuff one
# at a time.
logger.info(
"[%s %s] Acquiring room lock to fetch %d missing prev_events: %s",
room_id,
event_id,
len(missing_prevs),
shortstr(missing_prevs),
)
with (await self._room_pdu_linearizer.queue(pdu.room_id)):
logger.info(
"[%s %s] Acquired room lock to fetch %d missing prev_events",
room_id,
event_id,
len(missing_prevs),
)
try:
await self._get_missing_events_for_pdu(
origin, pdu, prevs, min_depth
)
except Exception as e:
raise Exception(
"Error fetching missing prev_events for %s: %s"
% (event_id, e)
) from e
# Update the set of things we've seen after trying to
# fetch the missing stuff
seen = await self.store.have_events_in_timeline(prevs)
if not prevs - seen:
logger.info(
"[%s %s] Found all missing prev_events",
room_id,
event_id,
)
if prevs - seen:
# We've still not been able to get all of the prev_events for this event.
#
# In this case, we need to fall back to asking another server in the
# federation for the state at this event. That's ok provided we then
# resolve the state against other bits of the DAG before using it (which
# will ensure that you can't just take over a room by sending an event,
# withholding its prev_events, and declaring yourself to be an admin in
# the subsequent state request).
#
# Now, if we're pulling this event as a missing prev_event, then clearly
# this event is not going to become the only forward-extremity and we are
# guaranteed to resolve its state against our existing forward
# extremities, so that should be fine.
#
# On the other hand, if this event was pushed to us, it is possible for
# it to become the only forward-extremity in the room, and we would then
# trust its state to be the state for the whole room. This is very bad.
# Further, if the event was pushed to us, there is no excuse for us not to
# have all the prev_events. We therefore reject any such events.
#
# XXX this really feels like it could/should be merged with the above,
# but there is an interaction with min_depth that I'm not really
# following.
if sent_to_us_directly:
logger.warning(
"[%s %s] Rejecting: failed to fetch %d prev events: %s",
room_id,
event_id,
len(prevs - seen),
shortstr(prevs - seen),
)
raise FederationError(
"ERROR",
403,
(
"Your server isn't divulging details about prev_events "
"referenced in this event."
),
affected=pdu.event_id,
)
logger.info(
"Event %s is missing prev_events: calculating state for a "
"backwards extremity",
event_id,
)
# Calculate the state after each of the previous events, and
# resolve them to find the correct state at the current event.
event_map = {event_id: pdu}
try:
# Get the state of the events we know about
ours = await self.state_store.get_state_groups_ids(room_id, seen)
# state_maps is a list of mappings from (type, state_key) to event_id
state_maps = list(ours.values()) # type: List[StateMap[str]]
# we don't need this any more, let's delete it.
del ours
# Ask the remote server for the states we don't
# know about
for p in prevs - seen:
logger.info(
"Requesting state at missing prev_event %s", event_id,
)
with nested_logging_context(p):
# note that if any of the missing prevs share missing state or
# auth events, the requests to fetch those events are deduped
# by the get_pdu_cache in federation_client.
(remote_state, _,) = await self._get_state_for_room(
origin, room_id, p, include_event_in_state=True
)
remote_state_map = {
(x.type, x.state_key): x.event_id for x in remote_state
}
state_maps.append(remote_state_map)
for x in remote_state:
event_map[x.event_id] = x
room_version = await self.store.get_room_version_id(room_id)
state_map = await self._state_resolution_handler.resolve_events_with_store(
room_id,
room_version,
state_maps,
event_map,
state_res_store=StateResolutionStore(self.store),
)
# We need to give _process_received_pdu the actual state events
# rather than event ids, so generate that now.
# First though we need to fetch all the events that are in
# state_map, so we can build up the state below.
evs = await self.store.get_events(
list(state_map.values()),
get_prev_content=False,
redact_behaviour=EventRedactBehaviour.AS_IS,
)
event_map.update(evs)
state = [event_map[e] for e in state_map.values()]
except Exception:
logger.warning(
"[%s %s] Error attempting to resolve state at missing "
"prev_events",
room_id,
event_id,
exc_info=True,
)
raise FederationError(
"ERROR",
403,
"We can't get valid state history.",
affected=event_id,
)
await self._process_received_pdu(origin, pdu, state=state)
async def _get_missing_events_for_pdu(self, origin, pdu, prevs, min_depth):
"""
Args:
origin (str): Origin of the pdu. Will be called to get the missing events
pdu: received pdu
prevs (set(str)): List of event ids which we are missing
min_depth (int): Minimum depth of events to return.
"""
room_id = pdu.room_id
event_id = pdu.event_id
seen = await self.store.have_events_in_timeline(prevs)
if not prevs - seen:
return
latest_list = await self.store.get_latest_event_ids_in_room(room_id)
# We add the prev events that we have seen to the latest
# list to ensure the remote server doesn't give them to us
latest = set(latest_list)
latest |= seen
logger.info(
"[%s %s]: Requesting missing events between %s and %s",
room_id,
event_id,
shortstr(latest),
event_id,
)
# XXX: we set timeout to 10s to help workaround
# https://github.com/matrix-org/synapse/issues/1733.
# The reason is to avoid holding the linearizer lock
# whilst processing inbound /send transactions, causing
# FDs to stack up and block other inbound transactions
# which empirically can currently take up to 30 minutes.
#
# N.B. this explicitly disables retry attempts.
#
# N.B. this also increases our chances of falling back to
# fetching fresh state for the room if the missing event
# can't be found, which slightly reduces our security.
# it may also increase our DAG extremity count for the room,
# causing additional state resolution? See #1760.
# However, fetching state doesn't hold the linearizer lock
# apparently.
#
# see https://github.com/matrix-org/synapse/pull/1744
#
# ----
#
# Update richvdh 2018/09/18: There are a number of problems with timing this
# request out aggressively on the client side:
#
# - it plays badly with the server-side rate-limiter, which starts tarpitting you
# if you send too many requests at once, so you end up with the server carefully
# working through the backlog of your requests, which you have already timed
# out.
#
# - for this request in particular, we now (as of
# https://github.com/matrix-org/synapse/pull/3456) reject any PDUs where the
# server can't produce a plausible-looking set of prev_events - so we becone
# much more likely to reject the event.
#
# - contrary to what it says above, we do *not* fall back to fetching fresh state
# for the room if get_missing_events times out. Rather, we give up processing
# the PDU whose prevs we are missing, which then makes it much more likely that
# we'll end up back here for the *next* PDU in the list, which exacerbates the
# problem.
#
# - the aggressive 10s timeout was introduced to deal with incoming federation
# requests taking 8 hours to process. It's not entirely clear why that was going
# on; certainly there were other issues causing traffic storms which are now
# resolved, and I think in any case we may be more sensible about our locking
# now. We're *certainly* more sensible about our logging.
#
# All that said: Let's try increasing the timeout to 60s and see what happens.
try:
missing_events = await self.federation_client.get_missing_events(
origin,
room_id,
earliest_events_ids=list(latest),
latest_events=[pdu],
limit=10,
min_depth=min_depth,
timeout=60000,
)
except (RequestSendFailed, HttpResponseException, NotRetryingDestination) as e:
# We failed to get the missing events, but since we need to handle
# the case of `get_missing_events` not returning the necessary
# events anyway, it is safe to simply log the error and continue.
logger.warning(
"[%s %s]: Failed to get prev_events: %s", room_id, event_id, e
)
return
logger.info(
"[%s %s]: Got %d prev_events: %s",
room_id,
event_id,
len(missing_events),
shortstr(missing_events),
)
# We want to sort these by depth so we process them and
# tell clients about them in order.
missing_events.sort(key=lambda x: x.depth)
for ev in missing_events:
logger.info(
"[%s %s] Handling received prev_event %s",
room_id,
event_id,
ev.event_id,
)
with nested_logging_context(ev.event_id):
try:
await self.on_receive_pdu(origin, ev, sent_to_us_directly=False)
except FederationError as e:
if e.code == 403:
logger.warning(
"[%s %s] Received prev_event %s failed history check.",
room_id,
event_id,
ev.event_id,
)
else:
raise
async def _get_state_for_room(
self,
destination: str,
room_id: str,
event_id: str,
include_event_in_state: bool = False,
) -> Tuple[List[EventBase], List[EventBase]]:
"""Requests all of the room state at a given event from a remote homeserver.
Args:
destination: The remote homeserver to query for the state.
room_id: The id of the room we're interested in.
event_id: The id of the event we want the state at.
include_event_in_state: if true, the event itself will be included in the
returned state event list.
Returns:
A list of events in the state, possibly including the event itself, and
a list of events in the auth chain for the given event.
"""
(
state_event_ids,
auth_event_ids,
) = await self.federation_client.get_room_state_ids(
destination, room_id, event_id=event_id
)
desired_events = set(state_event_ids + auth_event_ids)
if include_event_in_state:
desired_events.add(event_id)
event_map = await self._get_events_from_store_or_dest(
destination, room_id, desired_events
)
failed_to_fetch = desired_events - event_map.keys()
if failed_to_fetch:
logger.warning(
"Failed to fetch missing state/auth events for %s %s",
event_id,
failed_to_fetch,
)
remote_state = [
event_map[e_id] for e_id in state_event_ids if e_id in event_map
]
if include_event_in_state:
remote_event = event_map.get(event_id)
if not remote_event:
raise Exception("Unable to get missing prev_event %s" % (event_id,))
if remote_event.is_state() and remote_event.rejected_reason is None:
remote_state.append(remote_event)
auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map]
auth_chain.sort(key=lambda e: e.depth)
return remote_state, auth_chain
async def _get_events_from_store_or_dest(
self, destination: str, room_id: str, event_ids: Iterable[str]
) -> Dict[str, EventBase]:
"""Fetch events from a remote destination, checking if we already have them.
Persists any events we don't already have as outliers.
If we fail to fetch any of the events, a warning will be logged, and the event
will be omitted from the result. Likewise, any events which turn out not to
be in the given room.
This function *does not* automatically get missing auth events of the
newly fetched events. Callers must include the full auth chain of
of the missing events in the `event_ids` argument, to ensure that any
missing auth events are correctly fetched.
Returns:
map from event_id to event
"""
fetched_events = await self.store.get_events(event_ids, allow_rejected=True)
missing_events = set(event_ids) - fetched_events.keys()
if missing_events:
logger.debug(
"Fetching unknown state/auth events %s for room %s",
missing_events,
room_id,
)
await self._get_events_and_persist(
destination=destination, room_id=room_id, events=missing_events
)
# we need to make sure we re-load from the database to get the rejected
# state correct.
fetched_events.update(
(await self.store.get_events(missing_events, allow_rejected=True))
)
# check for events which were in the wrong room.
#
# this can happen if a remote server claims that the state or
# auth_events at an event in room A are actually events in room B
bad_events = [
(event_id, event.room_id)
for event_id, event in fetched_events.items()
if event.room_id != room_id
]
for bad_event_id, bad_room_id in bad_events:
# This is a bogus situation, but since we may only discover it a long time
# after it happened, we try our best to carry on, by just omitting the
# bad events from the returned auth/state set.
logger.warning(
"Remote server %s claims event %s in room %s is an auth/state "
"event in room %s",
destination,
bad_event_id,
bad_room_id,
room_id,
)
del fetched_events[bad_event_id]
return fetched_events
async def _process_received_pdu(
self, origin: str, event: EventBase, state: Optional[Iterable[EventBase]],
):
""" Called when we have a new pdu. We need to do auth checks and put it
through the StateHandler.
Args:
origin: server sending the event
event: event to be persisted
state: Normally None, but if we are handling a gap in the graph
(ie, we are missing one or more prev_events), the resolved state at the
event
"""
room_id = event.room_id
event_id = event.event_id
logger.debug("[%s %s] Processing event: %s", room_id, event_id, event)
try:
await self._handle_new_event(origin, event, state=state)
except AuthError as e:
raise FederationError("ERROR", e.code, e.msg, affected=event.event_id)
# For encrypted messages we check that we know about the sending device,
# if we don't then we mark the device cache for that user as stale.
if event.type == EventTypes.Encrypted:
device_id = event.content.get("device_id")
sender_key = event.content.get("sender_key")
cached_devices = await self.store.get_cached_devices_for_user(event.sender)
resync = False # Whether we should resync device lists.
device = None
if device_id is not None:
device = cached_devices.get(device_id)
if device is None:
logger.info(
"Received event from remote device not in our cache: %s %s",
event.sender,
device_id,
)
resync = True
# We also check if the `sender_key` matches what we expect.
if sender_key is not None:
# Figure out what sender key we're expecting. If we know the
# device and recognize the algorithm then we can work out the
# exact key to expect. Otherwise check it matches any key we
# have for that device.
current_keys = [] # type: Container[str]
if device:
keys = device.get("keys", {}).get("keys", {})
if (
event.content.get("algorithm")
== RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2
):
# For this algorithm we expect a curve25519 key.
key_name = "curve25519:%s" % (device_id,)
current_keys = [keys.get(key_name)]
else:
# We don't know understand the algorithm, so we just
# check it matches a key for the device.
current_keys = keys.values()
elif device_id:
# We don't have any keys for the device ID.
pass
else:
# The event didn't include a device ID, so we just look for
# keys across all devices.
current_keys = [
key
for device in cached_devices.values()
for key in device.get("keys", {}).get("keys", {}).values()
]
# We now check that the sender key matches (one of) the expected
# keys.
if sender_key not in current_keys:
logger.info(
"Received event from remote device with unexpected sender key: %s %s: %s",
event.sender,
device_id or "<no device_id>",
sender_key,
)
resync = True
if resync:
run_as_background_process(
"resync_device_due_to_pdu", self._resync_device, event.sender
)
async def _resync_device(self, sender: str) -> None:
"""We have detected that the device list for the given user may be out
of sync, so we try and resync them.
"""
try:
await self.store.mark_remote_user_device_cache_as_stale(sender)
# Immediately attempt a resync in the background
if self.config.worker_app:
await self._user_device_resync(user_id=sender)
else:
await self._device_list_updater.user_device_resync(sender)
except Exception:
logger.exception("Failed to resync device for %s", sender)
@log_function
async def backfill(self, dest, room_id, limit, extremities):
""" Trigger a backfill request to `dest` for the given `room_id`
This will attempt to get more events from the remote. If the other side
has no new events to offer, this will return an empty list.
As the events are received, we check their signatures, and also do some
sanity-checking on them. If any of the backfilled events are invalid,
this method throws a SynapseError.
TODO: make this more useful to distinguish failures of the remote
server from invalid events (there is probably no point in trying to
re-fetch invalid events from every other HS in the room.)
"""
if dest == self.server_name:
raise SynapseError(400, "Can't backfill from self.")
events = await self.federation_client.backfill(
dest, room_id, limit=limit, extremities=extremities
)
if not events:
return []
# ideally we'd sanity check the events here for excess prev_events etc,
# but it's hard to reject events at this point without completely
# breaking backfill in the same way that it is currently broken by
# events whose signature we cannot verify (#3121).
#
# So for now we accept the events anyway. #3124 tracks this.
#
# for ev in events:
# self._sanity_check_event(ev)
# Don't bother processing events we already have.
seen_events = await self.store.have_events_in_timeline(
{e.event_id for e in events}
)
events = [e for e in events if e.event_id not in seen_events]
if not events:
return []
event_map = {e.event_id: e for e in events}
event_ids = {e.event_id for e in events}
# build a list of events whose prev_events weren't in the batch.
# (XXX: this will include events whose prev_events we already have; that doesn't
# sound right?)
edges = [ev.event_id for ev in events if set(ev.prev_event_ids()) - event_ids]
logger.info("backfill: Got %d events with %d edges", len(events), len(edges))
# For each edge get the current state.
auth_events = {}
state_events = {}
events_to_state = {}
for e_id in edges:
state, auth = await self._get_state_for_room(
destination=dest,
room_id=room_id,
event_id=e_id,
include_event_in_state=False,
)
auth_events.update({a.event_id: a for a in auth})
auth_events.update({s.event_id: s for s in state})
state_events.update({s.event_id: s for s in state})
events_to_state[e_id] = state
required_auth = {
a_id
for event in events
+ list(state_events.values())
+ list(auth_events.values())
for a_id in event.auth_event_ids()
}
auth_events.update(
{e_id: event_map[e_id] for e_id in required_auth if e_id in event_map}
)
ev_infos = []
# Step 1: persist the events in the chunk we fetched state for (i.e.
# the backwards extremities), with custom auth events and state
for e_id in events_to_state:
# For paranoia we ensure that these events are marked as
# non-outliers
ev = event_map[e_id]
assert not ev.internal_metadata.is_outlier()
ev_infos.append(
_NewEventInfo(
event=ev,
state=events_to_state[e_id],
auth_events={
(
auth_events[a_id].type,
auth_events[a_id].state_key,
): auth_events[a_id]
for a_id in ev.auth_event_ids()
if a_id in auth_events
},
)
)
if ev_infos:
await self._handle_new_events(dest, room_id, ev_infos, backfilled=True)
# Step 2: Persist the rest of the events in the chunk one by one
events.sort(key=lambda e: e.depth)
for event in events:
if event in events_to_state:
continue
# For paranoia we ensure that these events are marked as
# non-outliers
assert not event.internal_metadata.is_outlier()
# We store these one at a time since each event depends on the
# previous to work out the state.
# TODO: We can probably do something more clever here.
await self._handle_new_event(dest, event, backfilled=True)
return events
async def maybe_backfill(
self, room_id: str, current_depth: int, limit: int
) -> bool:
"""Checks the database to see if we should backfill before paginating,
and if so do.
Args:
room_id
current_depth: The depth from which we're paginating from. This is
used to decide if we should backfill and what extremities to
use.
limit: The number of events that the pagination request will
return. This is used as part of the heuristic to decide if we
should back paginate.
"""
extremities = await self.store.get_oldest_events_with_depth_in_room(room_id)
if not extremities:
logger.debug("Not backfilling as no extremeties found.")
return False
# We only want to paginate if we can actually see the events we'll get,
# as otherwise we'll just spend a lot of resources to get redacted
# events.
#
# We do this by filtering all the backwards extremities and seeing if
# any remain. Given we don't have the extremity events themselves, we
# need to actually check the events that reference them.
#
# *Note*: the spec wants us to keep backfilling until we reach the start
# of the room in case we are allowed to see some of the history. However
# in practice that causes more issues than its worth, as a) its
# relatively rare for there to be any visible history and b) even when
# there is its often sufficiently long ago that clients would stop
# attempting to paginate before backfill reached the visible history.
#
# TODO: If we do do a backfill then we should filter the backwards
# extremities to only include those that point to visible portions of
# history.
#
# TODO: Correctly handle the case where we are allowed to see the
# forward event but not the backward extremity, e.g. in the case of
# initial join of the server where we are allowed to see the join
# event but not anything before it. This would require looking at the
# state *before* the event, ignoring the special casing certain event
# types have.
forward_events = await self.store.get_successor_events(list(extremities))
extremities_events = await self.store.get_events(
forward_events,
redact_behaviour=EventRedactBehaviour.AS_IS,
get_prev_content=False,
)
# We set `check_history_visibility_only` as we might otherwise get false
# positives from users having been erased.
filtered_extremities = await filter_events_for_server(
self.storage,
self.server_name,
list(extremities_events.values()),
redact=False,
check_history_visibility_only=True,
)
if not filtered_extremities:
return False
# Check if we reached a point where we should start backfilling.
sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1]))
max_depth = sorted_extremeties_tuple[0][1]
# If we're approaching an extremity we trigger a backfill, otherwise we
# no-op.
#
# We chose twice the limit here as then clients paginating backwards
# will send pagination requests that trigger backfill at least twice
# using the most recent extremity before it gets removed (see below). We
# chose more than one times the limit in case of failure, but choosing a
# much larger factor will result in triggering a backfill request much
# earlier than necessary.
if current_depth - 2 * limit > max_depth:
logger.debug(
"Not backfilling as we don't need to. %d < %d - 2 * %d",
max_depth,
current_depth,
limit,
)
return False
logger.debug(
"room_id: %s, backfill: current_depth: %s, max_depth: %s, extrems: %s",
room_id,
current_depth,
max_depth,
sorted_extremeties_tuple,
)
# We ignore extremities that have a greater depth than our current depth
# as:
# 1. we don't really care about getting events that have happened
# before our current position; and
# 2. we have likely previously tried and failed to backfill from that
# extremity, so to avoid getting "stuck" requesting the same
# backfill repeatedly we drop those extremities.
filtered_sorted_extremeties_tuple = [
t for t in sorted_extremeties_tuple if int(t[1]) <= current_depth
]
# However, we need to check that the filtered extremities are non-empty.
# If they are empty then either we can a) bail or b) still attempt to
# backill. We opt to try backfilling anyway just in case we do get
# relevant events.
if filtered_sorted_extremeties_tuple:
sorted_extremeties_tuple = filtered_sorted_extremeties_tuple
# We don't want to specify too many extremities as it causes the backfill
# request URI to be too long.
extremities = dict(sorted_extremeties_tuple[:5])
# Now we need to decide which hosts to hit first.
# First we try hosts that are already in the room
# TODO: HEURISTIC ALERT.
curr_state = await self.state_handler.get_current_state(room_id)
def get_domains_from_state(state):
"""Get joined domains from state
Args:
state (dict[tuple, FrozenEvent]): State map from type/state
key to event.
Returns:
list[tuple[str, int]]: Returns a list of servers with the
lowest depth of their joins. Sorted by lowest depth first.
"""
joined_users = [
(state_key, int(event.depth))
for (e_type, state_key), event in state.items()
if e_type == EventTypes.Member and event.membership == Membership.JOIN
]
joined_domains = {} # type: Dict[str, int]
for u, d in joined_users:
try:
dom = get_domain_from_id(u)
old_d = joined_domains.get(dom)
if old_d:
joined_domains[dom] = min(d, old_d)
else:
joined_domains[dom] = d
except Exception:
pass
return sorted(joined_domains.items(), key=lambda d: d[1])
curr_domains = get_domains_from_state(curr_state)
likely_domains = [
domain for domain, depth in curr_domains if domain != self.server_name
]
async def try_backfill(domains):
# TODO: Should we try multiple of these at a time?
for dom in domains:
try:
await self.backfill(
dom, room_id, limit=100, extremities=extremities
)
# If this succeeded then we probably already have the
# appropriate stuff.
# TODO: We can probably do something more intelligent here.
return True
except SynapseError as e:
logger.info("Failed to backfill from %s because %s", dom, e)
continue
except HttpResponseException as e:
if 400 <= e.code < 500:
raise e.to_synapse_error()
logger.info("Failed to backfill from %s because %s", dom, e)
continue
except CodeMessageException as e:
if 400 <= e.code < 500:
raise
logger.info("Failed to backfill from %s because %s", dom, e)
continue
except NotRetryingDestination as e:
logger.info(str(e))
continue
except RequestSendFailed as e:
logger.info("Failed to get backfill from %s because %s", dom, e)
continue
except FederationDeniedError as e:
logger.info(e)
continue
except Exception as e:
logger.exception("Failed to backfill from %s because %s", dom, e)
continue
return False
success = await try_backfill(likely_domains)
if success:
return True
# Huh, well *those* domains didn't work out. Lets try some domains
# from the time.
tried_domains = set(likely_domains)
tried_domains.add(self.server_name)
event_ids = list(extremities.keys())
logger.debug("calling resolve_state_groups in _maybe_backfill")
resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events)
states = await make_deferred_yieldable(
defer.gatherResults(
[resolve(room_id, [e]) for e in event_ids], consumeErrors=True
)
)
# dict[str, dict[tuple, str]], a map from event_id to state map of
# event_ids.
states = dict(zip(event_ids, [s.state for s in states]))
state_map = await self.store.get_events(
[e_id for ids in states.values() for e_id in ids.values()],
get_prev_content=False,
)
states = {
key: {
k: state_map[e_id]
for k, e_id in state_dict.items()
if e_id in state_map
}
for key, state_dict in states.items()
}
for e_id, _ in sorted_extremeties_tuple:
likely_domains = get_domains_from_state(states[e_id])
success = await try_backfill(
[dom for dom, _ in likely_domains if dom not in tried_domains]
)
if success:
return True
tried_domains.update(dom for dom, _ in likely_domains)
return False
async def _get_events_and_persist(
self, destination: str, room_id: str, events: Iterable[str]
):
"""Fetch the given events from a server, and persist them as outliers.
This function *does not* recursively get missing auth events of the
newly fetched events. Callers must include in the `events` argument
any missing events from the auth chain.
Logs a warning if we can't find the given event.
"""
room_version = await self.store.get_room_version(room_id)
event_map = {} # type: Dict[str, EventBase]
async def get_event(event_id: str):
with nested_logging_context(event_id):
try:
event = await self.federation_client.get_pdu(
[destination], event_id, room_version, outlier=True,
)
if event is None:
logger.warning(
"Server %s didn't return event %s", destination, event_id,
)
return
event_map[event.event_id] = event
except Exception as e:
logger.warning(
"Error fetching missing state/auth event %s: %s %s",
event_id,
type(e),
e,
)
await concurrently_execute(get_event, events, 5)
# Make a map of auth events for each event. We do this after fetching
# all the events as some of the events' auth events will be in the list
# of requested events.
auth_events = [
aid
for event in event_map.values()
for aid in event.auth_event_ids()
if aid not in event_map
]
persisted_events = await self.store.get_events(
auth_events, allow_rejected=True,
)
event_infos = []
for event in event_map.values():
auth = {}
for auth_event_id in event.auth_event_ids():
ae = persisted_events.get(auth_event_id) or event_map.get(auth_event_id)
if ae:
auth[(ae.type, ae.state_key)] = ae
else:
logger.info("Missing auth event %s", auth_event_id)
event_infos.append(_NewEventInfo(event, None, auth))
await self._handle_new_events(
destination, room_id, event_infos,
)
def _sanity_check_event(self, ev):
"""
Do some early sanity checks of a received event
In particular, checks it doesn't have an excessive number of
prev_events or auth_events, which could cause a huge state resolution
or cascade of event fetches.
Args:
ev (synapse.events.EventBase): event to be checked
Returns: None
Raises:
SynapseError if the event does not pass muster
"""
if len(ev.prev_event_ids()) > 20:
logger.warning(
"Rejecting event %s which has %i prev_events",
ev.event_id,
len(ev.prev_event_ids()),
)
raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many prev_events")
if len(ev.auth_event_ids()) > 10:
logger.warning(
"Rejecting event %s which has %i auth_events",
ev.event_id,
len(ev.auth_event_ids()),
)
raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many auth_events")
async def send_invite(self, target_host, event):
""" Sends the invite to the remote server for signing.
Invites must be signed by the invitee's server before distribution.
"""
pdu = await self.federation_client.send_invite(
destination=target_host,
room_id=event.room_id,
event_id=event.event_id,
pdu=event,
)
return pdu
async def on_event_auth(self, event_id: str) -> List[EventBase]:
event = await self.store.get_event(event_id)
auth = await self.store.get_auth_chain(
list(event.auth_event_ids()), include_given=True
)
return list(auth)
async def do_invite_join(
self, target_hosts: Iterable[str], room_id: str, joinee: str, content: JsonDict
) -> Tuple[str, int]:
""" Attempts to join the `joinee` to the room `room_id` via the
servers contained in `target_hosts`.
This first triggers a /make_join/ request that returns a partial
event that we can fill out and sign. This is then sent to the
remote server via /send_join/ which responds with the state at that
event and the auth_chains.
We suspend processing of any received events from this room until we
have finished processing the join.
Args:
target_hosts: List of servers to attempt to join the room with.
room_id: The ID of the room to join.
joinee: The User ID of the joining user.
content: The event content to use for the join event.
"""
# TODO: We should be able to call this on workers, but the upgrading of
# room stuff after join currently doesn't work on workers.
assert self.config.worker.worker_app is None
logger.debug("Joining %s to %s", joinee, room_id)
origin, event, room_version_obj = await self._make_and_verify_event(
target_hosts,
room_id,
joinee,
"join",
content,
params={"ver": KNOWN_ROOM_VERSIONS},
)
# This shouldn't happen, because the RoomMemberHandler has a
# linearizer lock which only allows one operation per user per room
# at a time - so this is just paranoia.
assert room_id not in self.room_queues
self.room_queues[room_id] = []
await self._clean_room_for_join(room_id)
handled_events = set()
try:
# Try the host we successfully got a response to /make_join/
# request first.
host_list = list(target_hosts)
try:
host_list.remove(origin)
host_list.insert(0, origin)
except ValueError:
pass
ret = await self.federation_client.send_join(
host_list, event, room_version_obj
)
origin = ret["origin"]
state = ret["state"]
auth_chain = ret["auth_chain"]
auth_chain.sort(key=lambda e: e.depth)
handled_events.update([s.event_id for s in state])
handled_events.update([a.event_id for a in auth_chain])
handled_events.add(event.event_id)
logger.debug("do_invite_join auth_chain: %s", auth_chain)
logger.debug("do_invite_join state: %s", state)
logger.debug("do_invite_join event: %s", event)
# if this is the first time we've joined this room, it's time to add
# a row to `rooms` with the correct room version. If there's already a
# row there, we should override it, since it may have been populated
# based on an invite request which lied about the room version.
#
# federation_client.send_join has already checked that the room
# version in the received create event is the same as room_version_obj,
# so we can rely on it now.
#
await self.store.upsert_room_on_join(
room_id=room_id, room_version=room_version_obj,
)
max_stream_id = await self._persist_auth_tree(
origin, room_id, auth_chain, state, event, room_version_obj
)
# We wait here until this instance has seen the events come down
# replication (if we're using replication) as the below uses caches.
await self._replication.wait_for_stream_position(
self.config.worker.events_shard_config.get_instance(room_id),
"events",
max_stream_id,
)
# Check whether this room is the result of an upgrade of a room we already know
# about. If so, migrate over user information
predecessor = await self.store.get_room_predecessor(room_id)
if not predecessor or not isinstance(predecessor.get("room_id"), str):
return event.event_id, max_stream_id
old_room_id = predecessor["room_id"]
logger.debug(
"Found predecessor for %s during remote join: %s", room_id, old_room_id
)
# We retrieve the room member handler here as to not cause a cyclic dependency
member_handler = self.hs.get_room_member_handler()
await member_handler.transfer_room_state_on_room_upgrade(
old_room_id, room_id
)
logger.debug("Finished joining %s to %s", joinee, room_id)
return event.event_id, max_stream_id
finally:
room_queue = self.room_queues[room_id]
del self.room_queues[room_id]
# we don't need to wait for the queued events to be processed -
# it's just a best-effort thing at this point. We do want to do
# them roughly in order, though, otherwise we'll end up making
# lots of requests for missing prev_events which we do actually
# have. Hence we fire off the background task, but don't wait for it.
run_in_background(self._handle_queued_pdus, room_queue)
async def _handle_queued_pdus(self, room_queue):
"""Process PDUs which got queued up while we were busy send_joining.
Args:
room_queue (list[FrozenEvent, str]): list of PDUs to be processed
and the servers that sent them
"""
for p, origin in room_queue:
try:
logger.info(
"Processing queued PDU %s which was received "
"while we were joining %s",
p.event_id,
p.room_id,
)
with nested_logging_context(p.event_id):
await self.on_receive_pdu(origin, p, sent_to_us_directly=True)
except Exception as e:
logger.warning(
"Error handling queued PDU %s from %s: %s", p.event_id, origin, e
)
async def on_make_join_request(
self, origin: str, room_id: str, user_id: str
) -> EventBase:
""" We've received a /make_join/ request, so we create a partial
join event for the room and return that. We do *not* persist or
process it until the other server has signed it and sent it back.
Args:
origin: The (verified) server name of the requesting server.
room_id: Room to create join event in
user_id: The user to create the join for
"""
if get_domain_from_id(user_id) != origin:
logger.info(
"Got /make_join request for user %r from different origin %s, ignoring",
user_id,
origin,
)
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
# checking the room version will check that we've actually heard of the room
# (and return a 404 otherwise)
room_version = await self.store.get_room_version_id(room_id)
# now check that we are *still* in the room
is_in_room = await self.auth.check_host_in_room(room_id, self.server_name)
if not is_in_room:
logger.info(
"Got /make_join request for room %s we are no longer in", room_id,
)
raise NotFoundError("Not an active room on this server")
event_content = {"membership": Membership.JOIN}
builder = self.event_builder_factory.new(
room_version,
{
"type": EventTypes.Member,
"content": event_content,
"room_id": room_id,
"sender": user_id,
"state_key": user_id,
},
)
try:
event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
)
except SynapseError as e:
logger.warning("Failed to create join to %s because %s", room_id, e)
raise
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_join_request`
await self.auth.check_from_context(
room_version, event, context, do_sig_check=False
)
return event
async def on_send_join_request(self, origin, pdu):
""" We have received a join event for a room. Fully process it and
respond with the current state and auth chains.
"""
event = pdu
logger.debug(
"on_send_join_request from %s: Got event: %s, signatures: %s",
origin,
event.event_id,
event.signatures,
)
if get_domain_from_id(event.sender) != origin:
logger.info(
"Got /send_join request for user %r from different origin %s",
event.sender,
origin,
)
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
event.internal_metadata.outlier = False
# Send this event on behalf of the origin server.
#
# The reasons we have the destination server rather than the origin
# server send it are slightly mysterious: the origin server should have
# all the necessary state once it gets the response to the send_join,
# so it could send the event itself if it wanted to. It may be that
# doing it this way reduces failure modes, or avoids certain attacks
# where a new server selectively tells a subset of the federation that
# it has joined.
#
# The fact is that, as of the current writing, Synapse doesn't send out
# the join event over federation after joining, and changing it now
# would introduce the danger of backwards-compatibility problems.
event.internal_metadata.send_on_behalf_of = origin
context = await self._handle_new_event(origin, event)
logger.debug(
"on_send_join_request: After _handle_new_event: %s, sigs: %s",
event.event_id,
event.signatures,
)
prev_state_ids = await context.get_prev_state_ids()
state_ids = list(prev_state_ids.values())
auth_chain = await self.store.get_auth_chain(state_ids)
state = await self.store.get_events(list(prev_state_ids.values()))
return {"state": list(state.values()), "auth_chain": auth_chain}
async def on_invite_request(
self, origin: str, event: EventBase, room_version: RoomVersion
):
""" We've got an invite event. Process and persist it. Sign it.
Respond with the now signed event.
"""
if event.state_key is None:
raise SynapseError(400, "The invite event did not have a state key")
is_blocked = await self.store.is_room_blocked(event.room_id)
if is_blocked:
raise SynapseError(403, "This room has been blocked on this server")
if self.hs.config.block_non_admin_invites:
raise SynapseError(403, "This server does not accept room invites")
if not self.spam_checker.user_may_invite(
event.sender, event.state_key, event.room_id
):
raise SynapseError(
403, "This user is not permitted to send invites to this server/user"
)
membership = event.content.get("membership")
if event.type != EventTypes.Member or membership != Membership.INVITE:
raise SynapseError(400, "The event was not an m.room.member invite event")
sender_domain = get_domain_from_id(event.sender)
if sender_domain != origin:
raise SynapseError(
400, "The invite event was not from the server sending it"
)
if not self.is_mine_id(event.state_key):
raise SynapseError(400, "The invite event must be for this server")
# block any attempts to invite the server notices mxid
if event.state_key == self._server_notices_mxid:
raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user")
# keep a record of the room version, if we don't yet know it.
# (this may get overwritten if we later get a different room version in a
# join dance).
await self._maybe_store_room_on_invite(
room_id=event.room_id, room_version=room_version
)
event.internal_metadata.outlier = True
event.internal_metadata.out_of_band_membership = True
event.signatures.update(
compute_event_signature(
room_version,
event.get_pdu_json(),
self.hs.hostname,
self.hs.signing_key,
)
)
context = await self.state_handler.compute_event_context(event)
await self.persist_events_and_notify(event.room_id, [(event, context)])
return event
async def do_remotely_reject_invite(
self, target_hosts: Iterable[str], room_id: str, user_id: str, content: JsonDict
) -> Tuple[EventBase, int]:
origin, event, room_version = await self._make_and_verify_event(
target_hosts, room_id, user_id, "leave", content=content
)
# Mark as outlier as we don't have any state for this event; we're not
# even in the room.
event.internal_metadata.outlier = True
event.internal_metadata.out_of_band_membership = True
# Try the host that we successfully called /make_leave/ on first for
# the /send_leave/ request.
host_list = list(target_hosts)
try:
host_list.remove(origin)
host_list.insert(0, origin)
except ValueError:
pass
await self.federation_client.send_leave(host_list, event)
context = await self.state_handler.compute_event_context(event)
stream_id = await self.persist_events_and_notify(
event.room_id, [(event, context)]
)
return event, stream_id
async def _make_and_verify_event(
self,
target_hosts: Iterable[str],
room_id: str,
user_id: str,
membership: str,
content: JsonDict = {},
params: Optional[Dict[str, Union[str, Iterable[str]]]] = None,
) -> Tuple[str, EventBase, RoomVersion]:
(
origin,
event,
room_version,
) = await self.federation_client.make_membership_event(
target_hosts, room_id, user_id, membership, content, params=params
)
logger.debug("Got response to make_%s: %s", membership, event)
# We should assert some things.
# FIXME: Do this in a nicer way
assert event.type == EventTypes.Member
assert event.user_id == user_id
assert event.state_key == user_id
assert event.room_id == room_id
return origin, event, room_version
async def on_make_leave_request(
self, origin: str, room_id: str, user_id: str
) -> EventBase:
""" We've received a /make_leave/ request, so we create a partial
leave event for the room and return that. We do *not* persist or
process it until the other server has signed it and sent it back.
Args:
origin: The (verified) server name of the requesting server.
room_id: Room to create leave event in
user_id: The user to create the leave for
"""
if get_domain_from_id(user_id) != origin:
logger.info(
"Got /make_leave request for user %r from different origin %s, ignoring",
user_id,
origin,
)
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
room_version = await self.store.get_room_version_id(room_id)
builder = self.event_builder_factory.new(
room_version,
{
"type": EventTypes.Member,
"content": {"membership": Membership.LEAVE},
"room_id": room_id,
"sender": user_id,
"state_key": user_id,
},
)
event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
)
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_leave_request`
await self.auth.check_from_context(
room_version, event, context, do_sig_check=False
)
except AuthError as e:
logger.warning("Failed to create new leave %r because %s", event, e)
raise e
return event
async def on_send_leave_request(self, origin, pdu):
""" We have received a leave event for a room. Fully process it."""
event = pdu
logger.debug(
"on_send_leave_request: Got event: %s, signatures: %s",
event.event_id,
event.signatures,
)
if get_domain_from_id(event.sender) != origin:
logger.info(
"Got /send_leave request for user %r from different origin %s",
event.sender,
origin,
)
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
event.internal_metadata.outlier = False
await self._handle_new_event(origin, event)
logger.debug(
"on_send_leave_request: After _handle_new_event: %s, sigs: %s",
event.event_id,
event.signatures,
)
return None
async def get_state_for_pdu(self, room_id: str, event_id: str) -> List[EventBase]:
"""Returns the state at the event. i.e. not including said event.
"""
event = await self.store.get_event(event_id, check_room_id=room_id)
state_groups = await self.state_store.get_state_groups(room_id, [event_id])
if state_groups:
_, state = list(state_groups.items()).pop()
results = {(e.type, e.state_key): e for e in state}
if event.is_state():
# Get previous state
if "replaces_state" in event.unsigned:
prev_id = event.unsigned["replaces_state"]
if prev_id != event.event_id:
prev_event = await self.store.get_event(prev_id)
results[(event.type, event.state_key)] = prev_event
else:
del results[(event.type, event.state_key)]
res = list(results.values())
return res
else:
return []
async def get_state_ids_for_pdu(self, room_id: str, event_id: str) -> List[str]:
"""Returns the state at the event. i.e. not including said event.
"""
event = await self.store.get_event(event_id, check_room_id=room_id)
state_groups = await self.state_store.get_state_groups_ids(room_id, [event_id])
if state_groups:
_, state = list(state_groups.items()).pop()
results = state
if event.is_state():
# Get previous state
if "replaces_state" in event.unsigned:
prev_id = event.unsigned["replaces_state"]
if prev_id != event.event_id:
results[(event.type, event.state_key)] = prev_id
else:
results.pop((event.type, event.state_key), None)
return list(results.values())
else:
return []
@log_function
async def on_backfill_request(
self, origin: str, room_id: str, pdu_list: List[str], limit: int
) -> List[EventBase]:
in_room = await self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
# Synapse asks for 100 events per backfill request. Do not allow more.
limit = min(limit, 100)
events = await self.store.get_backfill_events(room_id, pdu_list, limit)
events = await filter_events_for_server(self.storage, origin, events)
return events
@log_function
async def get_persisted_pdu(
self, origin: str, event_id: str
) -> Optional[EventBase]:
"""Get an event from the database for the given server.
Args:
origin: hostname of server which is requesting the event; we
will check that the server is allowed to see it.
event_id: id of the event being requested
Returns:
None if we know nothing about the event; otherwise the (possibly-redacted) event.
Raises:
AuthError if the server is not currently in the room
"""
event = await self.store.get_event(
event_id, allow_none=True, allow_rejected=True
)
if event:
in_room = await self.auth.check_host_in_room(event.room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
events = await filter_events_for_server(self.storage, origin, [event])
event = events[0]
return event
else:
return None
async def get_min_depth_for_context(self, context):
return await self.store.get_min_depth(context)
async def _handle_new_event(
self, origin, event, state=None, auth_events=None, backfilled=False
):
context = await self._prep_event(
origin, event, state=state, auth_events=auth_events, backfilled=backfilled
)
try:
if (
not event.internal_metadata.is_outlier()
and not backfilled
and not context.rejected
):
await self.action_generator.handle_push_actions_for_event(
event, context
)
await self.persist_events_and_notify(
event.room_id, [(event, context)], backfilled=backfilled
)
except Exception:
run_in_background(
self.store.remove_push_actions_from_staging, event.event_id
)
raise
return context
async def _handle_new_events(
self,
origin: str,
room_id: str,
event_infos: Iterable[_NewEventInfo],
backfilled: bool = False,
) -> None:
"""Creates the appropriate contexts and persists events. The events
should not depend on one another, e.g. this should be used to persist
a bunch of outliers, but not a chunk of individual events that depend
on each other for state calculations.
Notifies about the events where appropriate.
"""
async def prep(ev_info: _NewEventInfo):
event = ev_info.event
with nested_logging_context(suffix=event.event_id):
res = await self._prep_event(
origin,
event,
state=ev_info.state,
auth_events=ev_info.auth_events,
backfilled=backfilled,
)
return res
contexts = await make_deferred_yieldable(
defer.gatherResults(
[run_in_background(prep, ev_info) for ev_info in event_infos],
consumeErrors=True,
)
)
await self.persist_events_and_notify(
room_id,
[
(ev_info.event, context)
for ev_info, context in zip(event_infos, contexts)
],
backfilled=backfilled,
)
async def _persist_auth_tree(
self,
origin: str,
room_id: str,
auth_events: List[EventBase],
state: List[EventBase],
event: EventBase,
room_version: RoomVersion,
) -> int:
"""Checks the auth chain is valid (and passes auth checks) for the
state and event. Then persists the auth chain and state atomically.
Persists the event separately. Notifies about the persisted events
where appropriate.
Will attempt to fetch missing auth events.
Args:
origin: Where the events came from
room_id,
auth_events
state
event
room_version: The room version we expect this room to have, and
will raise if it doesn't match the version in the create event.
"""
events_to_context = {}
for e in itertools.chain(auth_events, state):
e.internal_metadata.outlier = True
ctx = await self.state_handler.compute_event_context(e)
events_to_context[e.event_id] = ctx
event_map = {
e.event_id: e for e in itertools.chain(auth_events, state, [event])
}
create_event = None
for e in auth_events:
if (e.type, e.state_key) == (EventTypes.Create, ""):
create_event = e
break
if create_event is None:
# If the state doesn't have a create event then the room is
# invalid, and it would fail auth checks anyway.
raise SynapseError(400, "No create event in state")
room_version_id = create_event.content.get(
"room_version", RoomVersions.V1.identifier
)
if room_version.identifier != room_version_id:
raise SynapseError(400, "Room version mismatch")
missing_auth_events = set()
for e in itertools.chain(auth_events, state, [event]):
for e_id in e.auth_event_ids():
if e_id not in event_map:
missing_auth_events.add(e_id)
for e_id in missing_auth_events:
m_ev = await self.federation_client.get_pdu(
[origin], e_id, room_version=room_version, outlier=True, timeout=10000,
)
if m_ev and m_ev.event_id == e_id:
event_map[e_id] = m_ev
else:
logger.info("Failed to find auth event %r", e_id)
for e in itertools.chain(auth_events, state, [event]):
auth_for_e = {
(event_map[e_id].type, event_map[e_id].state_key): event_map[e_id]
for e_id in e.auth_event_ids()
if e_id in event_map
}
if create_event:
auth_for_e[(EventTypes.Create, "")] = create_event
try:
event_auth.check(room_version, e, auth_events=auth_for_e)
except SynapseError as err:
# we may get SynapseErrors here as well as AuthErrors. For
# instance, there are a couple of (ancient) events in some
# rooms whose senders do not have the correct sigil; these
# cause SynapseErrors in auth.check. We don't want to give up
# the attempt to federate altogether in such cases.
logger.warning("Rejecting %s because %s", e.event_id, err.msg)
if e == event:
raise
events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR
await self.persist_events_and_notify(
room_id,
[
(e, events_to_context[e.event_id])
for e in itertools.chain(auth_events, state)
],
)
new_event_context = await self.state_handler.compute_event_context(
event, old_state=state
)
return await self.persist_events_and_notify(
room_id, [(event, new_event_context)]
)
async def _prep_event(
self,
origin: str,
event: EventBase,
state: Optional[Iterable[EventBase]],
auth_events: Optional[MutableStateMap[EventBase]],
backfilled: bool,
) -> EventContext:
context = await self.state_handler.compute_event_context(event, old_state=state)
if not auth_events:
prev_state_ids = await context.get_prev_state_ids()
auth_events_ids = self.auth.compute_auth_events(
event, prev_state_ids, for_verification=True
)
auth_events_x = await self.store.get_events(auth_events_ids)
auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()}
# This is a hack to fix some old rooms where the initial join event
# didn't reference the create event in its auth events.
if event.type == EventTypes.Member and not event.auth_event_ids():
if len(event.prev_event_ids()) == 1 and event.depth < 5:
c = await self.store.get_event(
event.prev_event_ids()[0], allow_none=True
)
if c and c.type == EventTypes.Create:
auth_events[(c.type, c.state_key)] = c
context = await self.do_auth(origin, event, context, auth_events=auth_events)
if not context.rejected:
await self._check_for_soft_fail(event, state, backfilled)
if event.type == EventTypes.GuestAccess and not context.rejected:
await self.maybe_kick_guest_users(event)
return context
async def _check_for_soft_fail(
self, event: EventBase, state: Optional[Iterable[EventBase]], backfilled: bool
) -> None:
"""Checks if we should soft fail the event; if so, marks the event as
such.
Args:
event
state: The state at the event if we don't have all the event's prev events
backfilled: Whether the event is from backfill
"""
# For new (non-backfilled and non-outlier) events we check if the event
# passes auth based on the current state. If it doesn't then we
# "soft-fail" the event.
if backfilled or event.internal_metadata.is_outlier():
return
extrem_ids_list = await self.store.get_latest_event_ids_in_room(event.room_id)
extrem_ids = set(extrem_ids_list)
prev_event_ids = set(event.prev_event_ids())
if extrem_ids == prev_event_ids:
# If they're the same then the current state is the same as the
# state at the event, so no point rechecking auth for soft fail.
return
room_version = await self.store.get_room_version_id(event.room_id)
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
# Calculate the "current state".
if state is not None:
# If we're explicitly given the state then we won't have all the
# prev events, and so we have a gap in the graph. In this case
# we want to be a little careful as we might have been down for
# a while and have an incorrect view of the current state,
# however we still want to do checks as gaps are easy to
# maliciously manufacture.
#
# So we use a "current state" that is actually a state
# resolution across the current forward extremities and the
# given state at the event. This should correctly handle cases
# like bans, especially with state res v2.
state_sets_d = await self.state_store.get_state_groups(
event.room_id, extrem_ids
)
state_sets = list(state_sets_d.values()) # type: List[Iterable[EventBase]]
state_sets.append(state)
current_states = await self.state_handler.resolve_events(
room_version, state_sets, event
)
current_state_ids = {
k: e.event_id for k, e in current_states.items()
} # type: StateMap[str]
else:
current_state_ids = await self.state_handler.get_current_state_ids(
event.room_id, latest_event_ids=extrem_ids
)
logger.debug(
"Doing soft-fail check for %s: state %s", event.event_id, current_state_ids,
)
# Now check if event pass auth against said current state
auth_types = auth_types_for_event(event)
current_state_ids_list = [
e for k, e in current_state_ids.items() if k in auth_types
]
auth_events_map = await self.store.get_events(current_state_ids_list)
current_auth_events = {
(e.type, e.state_key): e for e in auth_events_map.values()
}
try:
event_auth.check(room_version_obj, event, auth_events=current_auth_events)
except AuthError as e:
logger.warning("Soft-failing %r because %s", event, e)
event.internal_metadata.soft_failed = True
async def on_query_auth(
self, origin, event_id, room_id, remote_auth_chain, rejects, missing
):
in_room = await self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
event = await self.store.get_event(event_id, check_room_id=room_id)
# Just go through and process each event in `remote_auth_chain`. We
# don't want to fall into the trap of `missing` being wrong.
for e in remote_auth_chain:
try:
await self._handle_new_event(origin, e)
except AuthError:
pass
# Now get the current auth_chain for the event.
local_auth_chain = await self.store.get_auth_chain(
list(event.auth_event_ids()), include_given=True
)
# TODO: Check if we would now reject event_id. If so we need to tell
# everyone.
ret = await self.construct_auth_difference(local_auth_chain, remote_auth_chain)
logger.debug("on_query_auth returning: %s", ret)
return ret
async def on_get_missing_events(
self, origin, room_id, earliest_events, latest_events, limit
):
in_room = await self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
# Only allow up to 20 events to be retrieved per request.
limit = min(limit, 20)
missing_events = await self.store.get_missing_events(
room_id=room_id,
earliest_events=earliest_events,
latest_events=latest_events,
limit=limit,
)
missing_events = await filter_events_for_server(
self.storage, origin, missing_events
)
return missing_events
async def do_auth(
self,
origin: str,
event: EventBase,
context: EventContext,
auth_events: MutableStateMap[EventBase],
) -> EventContext:
"""
Args:
origin:
event:
context:
auth_events:
Map from (event_type, state_key) to event
Normally, our calculated auth_events based on the state of the room
at the event's position in the DAG, though occasionally (eg if the
event is an outlier), may be the auth events claimed by the remote
server.
Also NB that this function adds entries to it.
Returns:
updated context object
"""
room_version = await self.store.get_room_version_id(event.room_id)
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
try:
context = await self._update_auth_events_and_context_for_auth(
origin, event, context, auth_events
)
except Exception:
# We don't really mind if the above fails, so lets not fail
# processing if it does. However, it really shouldn't fail so
# let's still log as an exception since we'll still want to fix
# any bugs.
logger.exception(
"Failed to double check auth events for %s with remote. "
"Ignoring failure and continuing processing of event.",
event.event_id,
)
try:
event_auth.check(room_version_obj, event, auth_events=auth_events)
except AuthError as e:
logger.warning("Failed auth resolution for %r because %s", event, e)
context.rejected = RejectedReason.AUTH_ERROR
return context
async def _update_auth_events_and_context_for_auth(
self,
origin: str,
event: EventBase,
context: EventContext,
auth_events: MutableStateMap[EventBase],
) -> EventContext:
"""Helper for do_auth. See there for docs.
Checks whether a given event has the expected auth events. If it
doesn't then we talk to the remote server to compare state to see if
we can come to a consensus (e.g. if one server missed some valid
state).
This attempts to resolve any potential divergence of state between
servers, but is not essential and so failures should not block further
processing of the event.
Args:
origin:
event:
context:
auth_events:
Map from (event_type, state_key) to event
Normally, our calculated auth_events based on the state of the room
at the event's position in the DAG, though occasionally (eg if the
event is an outlier), may be the auth events claimed by the remote
server.
Also NB that this function adds entries to it.
Returns:
updated context
"""
event_auth_events = set(event.auth_event_ids())
# missing_auth is the set of the event's auth_events which we don't yet have
# in auth_events.
missing_auth = event_auth_events.difference(
e.event_id for e in auth_events.values()
)
# if we have missing events, we need to fetch those events from somewhere.
#
# we start by checking if they are in the store, and then try calling /event_auth/.
if missing_auth:
have_events = await self.store.have_seen_events(missing_auth)
logger.debug("Events %s are in the store", have_events)
missing_auth.difference_update(have_events)
if missing_auth:
# If we don't have all the auth events, we need to get them.
logger.info("auth_events contains unknown events: %s", missing_auth)
try:
try:
remote_auth_chain = await self.federation_client.get_event_auth(
origin, event.room_id, event.event_id
)
except RequestSendFailed as e1:
# The other side isn't around or doesn't implement the
# endpoint, so lets just bail out.
logger.info("Failed to get event auth from remote: %s", e1)
return context
seen_remotes = await self.store.have_seen_events(
[e.event_id for e in remote_auth_chain]
)
for e in remote_auth_chain:
if e.event_id in seen_remotes:
continue
if e.event_id == event.event_id:
continue
try:
auth_ids = e.auth_event_ids()
auth = {
(e.type, e.state_key): e
for e in remote_auth_chain
if e.event_id in auth_ids or e.type == EventTypes.Create
}
e.internal_metadata.outlier = True
logger.debug(
"do_auth %s missing_auth: %s", event.event_id, e.event_id
)
await self._handle_new_event(origin, e, auth_events=auth)
if e.event_id in event_auth_events:
auth_events[(e.type, e.state_key)] = e
except AuthError:
pass
except Exception:
logger.exception("Failed to get auth chain")
if event.internal_metadata.is_outlier():
# XXX: given that, for an outlier, we'll be working with the
# event's *claimed* auth events rather than those we calculated:
# (a) is there any point in this test, since different_auth below will
# obviously be empty
# (b) alternatively, why don't we do it earlier?
logger.info("Skipping auth_event fetch for outlier")
return context
different_auth = event_auth_events.difference(
e.event_id for e in auth_events.values()
)
if not different_auth:
return context
logger.info(
"auth_events refers to events which are not in our calculated auth "
"chain: %s",
different_auth,
)
# XXX: currently this checks for redactions but I'm not convinced that is
# necessary?
different_events = await self.store.get_events_as_list(different_auth)
for d in different_events:
if d.room_id != event.room_id:
logger.warning(
"Event %s refers to auth_event %s which is in a different room",
event.event_id,
d.event_id,
)
# don't attempt to resolve the claimed auth events against our own
# in this case: just use our own auth events.
#
# XXX: should we reject the event in this case? It feels like we should,
# but then shouldn't we also do so if we've failed to fetch any of the
# auth events?
return context
# now we state-resolve between our own idea of the auth events, and the remote's
# idea of them.
local_state = auth_events.values()
remote_auth_events = dict(auth_events)
remote_auth_events.update({(d.type, d.state_key): d for d in different_events})
remote_state = remote_auth_events.values()
room_version = await self.store.get_room_version_id(event.room_id)
new_state = await self.state_handler.resolve_events(
room_version, (local_state, remote_state), event
)
logger.info(
"After state res: updating auth_events with new state %s",
{
(d.type, d.state_key): d.event_id
for d in new_state.values()
if auth_events.get((d.type, d.state_key)) != d
},
)
auth_events.update(new_state)
context = await self._update_context_for_auth_events(
event, context, auth_events
)
return context
async def _update_context_for_auth_events(
self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase]
) -> EventContext:
"""Update the state_ids in an event context after auth event resolution,
storing the changes as a new state group.
Args:
event: The event we're handling the context for
context: initial event context
auth_events: Events to update in the event context.
Returns:
new event context
"""
# exclude the state key of the new event from the current_state in the context.
if event.is_state():
event_key = (event.type, event.state_key) # type: Optional[Tuple[str, str]]
else:
event_key = None
state_updates = {
k: a.event_id for k, a in auth_events.items() if k != event_key
}
current_state_ids = await context.get_current_state_ids()
current_state_ids = dict(current_state_ids) # type: ignore
current_state_ids.update(state_updates)
prev_state_ids = await context.get_prev_state_ids()
prev_state_ids = dict(prev_state_ids)
prev_state_ids.update({k: a.event_id for k, a in auth_events.items()})
# create a new state group as a delta from the existing one.
prev_group = context.state_group
state_group = await self.state_store.store_state_group(
event.event_id,
event.room_id,
prev_group=prev_group,
delta_ids=state_updates,
current_state_ids=current_state_ids,
)
return EventContext.with_state(
state_group=state_group,
state_group_before_event=context.state_group_before_event,
current_state_ids=current_state_ids,
prev_state_ids=prev_state_ids,
prev_group=prev_group,
delta_ids=state_updates,
)
async def construct_auth_difference(
self, local_auth: Iterable[EventBase], remote_auth: Iterable[EventBase]
) -> Dict:
""" Given a local and remote auth chain, find the differences. This
assumes that we have already processed all events in remote_auth
Params:
local_auth (list)
remote_auth (list)
Returns:
dict
"""
logger.debug("construct_auth_difference Start!")
# TODO: Make sure we are OK with local_auth or remote_auth having more
# auth events in them than strictly necessary.
def sort_fun(ev):
return ev.depth, ev.event_id
logger.debug("construct_auth_difference after sort_fun!")
# We find the differences by starting at the "bottom" of each list
# and iterating up on both lists. The lists are ordered by depth and
# then event_id, we iterate up both lists until we find the event ids
# don't match. Then we look at depth/event_id to see which side is
# missing that event, and iterate only up that list. Repeat.
remote_list = list(remote_auth)
remote_list.sort(key=sort_fun)
local_list = list(local_auth)
local_list.sort(key=sort_fun)
local_iter = iter(local_list)
remote_iter = iter(remote_list)
logger.debug("construct_auth_difference before get_next!")
def get_next(it, opt=None):
try:
return next(it)
except Exception:
return opt
current_local = get_next(local_iter)
current_remote = get_next(remote_iter)
logger.debug("construct_auth_difference before while")
missing_remotes = []
missing_locals = []
while current_local or current_remote:
if current_remote is None:
missing_locals.append(current_local)
current_local = get_next(local_iter)
continue
if current_local is None:
missing_remotes.append(current_remote)
current_remote = get_next(remote_iter)
continue
if current_local.event_id == current_remote.event_id:
current_local = get_next(local_iter)
current_remote = get_next(remote_iter)
continue
if current_local.depth < current_remote.depth:
missing_locals.append(current_local)
current_local = get_next(local_iter)
continue
if current_local.depth > current_remote.depth:
missing_remotes.append(current_remote)
current_remote = get_next(remote_iter)
continue
# They have the same depth, so we fall back to the event_id order
if current_local.event_id < current_remote.event_id:
missing_locals.append(current_local)
current_local = get_next(local_iter)
if current_local.event_id > current_remote.event_id:
missing_remotes.append(current_remote)
current_remote = get_next(remote_iter)
continue
logger.debug("construct_auth_difference after while")
# missing locals should be sent to the server
# We should find why we are missing remotes, as they will have been
# rejected.
# Remove events from missing_remotes if they are referencing a missing
# remote. We only care about the "root" rejected ones.
missing_remote_ids = [e.event_id for e in missing_remotes]
base_remote_rejected = list(missing_remotes)
for e in missing_remotes:
for e_id in e.auth_event_ids():
if e_id in missing_remote_ids:
try:
base_remote_rejected.remove(e)
except ValueError:
pass
reason_map = {}
for e in base_remote_rejected:
reason = await self.store.get_rejection_reason(e.event_id)
if reason is None:
# TODO: e is not in the current state, so we should
# construct some proof of that.
continue
reason_map[e.event_id] = reason
logger.debug("construct_auth_difference returning")
return {
"auth_chain": local_auth,
"rejects": {
e.event_id: {"reason": reason_map[e.event_id], "proof": None}
for e in base_remote_rejected
},
"missing": [e.event_id for e in missing_locals],
}
@log_function
async def exchange_third_party_invite(
self, sender_user_id, target_user_id, room_id, signed
):
third_party_invite = {"signed": signed}
event_dict = {
"type": EventTypes.Member,
"content": {
"membership": Membership.INVITE,
"third_party_invite": third_party_invite,
},
"room_id": room_id,
"sender": sender_user_id,
"state_key": target_user_id,
}
if await self.auth.check_host_in_room(room_id, self.hs.hostname):
room_version = await self.store.get_room_version_id(room_id)
builder = self.event_builder_factory.new(room_version, event_dict)
EventValidator().validate_builder(builder)
event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
)
event, context = await self.add_display_name_to_third_party_invite(
room_version, event_dict, event, context
)
EventValidator().validate_new(event, self.config)
# We need to tell the transaction queue to send this out, even
# though the sender isn't a local user.
event.internal_metadata.send_on_behalf_of = self.hs.hostname
try:
await self.auth.check_from_context(room_version, event, context)
except AuthError as e:
logger.warning("Denying new third party invite %r because %s", event, e)
raise e
await self._check_signature(event, context)
# We retrieve the room member handler here as to not cause a cyclic dependency
member_handler = self.hs.get_room_member_handler()
await member_handler.send_membership_event(None, event, context)
else:
destinations = {x.split(":", 1)[-1] for x in (sender_user_id, room_id)}
await self.federation_client.forward_third_party_invite(
destinations, room_id, event_dict
)
async def on_exchange_third_party_invite_request(
self, room_id: str, event_dict: JsonDict
) -> None:
"""Handle an exchange_third_party_invite request from a remote server
The remote server will call this when it wants to turn a 3pid invite
into a normal m.room.member invite.
Args:
room_id: The ID of the room.
event_dict (dict[str, Any]): Dictionary containing the event body.
"""
room_version = await self.store.get_room_version_id(room_id)
# NB: event_dict has a particular specced format we might need to fudge
# if we change event formats too much.
builder = self.event_builder_factory.new(room_version, event_dict)
event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
)
event, context = await self.add_display_name_to_third_party_invite(
room_version, event_dict, event, context
)
try:
await self.auth.check_from_context(room_version, event, context)
except AuthError as e:
logger.warning("Denying third party invite %r because %s", event, e)
raise e
await self._check_signature(event, context)
# We need to tell the transaction queue to send this out, even
# though the sender isn't a local user.
event.internal_metadata.send_on_behalf_of = get_domain_from_id(event.sender)
# We retrieve the room member handler here as to not cause a cyclic dependency
member_handler = self.hs.get_room_member_handler()
await member_handler.send_membership_event(None, event, context)
async def add_display_name_to_third_party_invite(
self, room_version, event_dict, event, context
):
key = (
EventTypes.ThirdPartyInvite,
event.content["third_party_invite"]["signed"]["token"],
)
original_invite = None
prev_state_ids = await context.get_prev_state_ids()
original_invite_id = prev_state_ids.get(key)
if original_invite_id:
original_invite = await self.store.get_event(
original_invite_id, allow_none=True
)
if original_invite:
# If the m.room.third_party_invite event's content is empty, it means the
# invite has been revoked. In this case, we don't have to raise an error here
# because the auth check will fail on the invite (because it's not able to
# fetch public keys from the m.room.third_party_invite event's content, which
# is empty).
display_name = original_invite.content.get("display_name")
event_dict["content"]["third_party_invite"]["display_name"] = display_name
else:
logger.info(
"Could not find invite event for third_party_invite: %r", event_dict
)
# We don't discard here as this is not the appropriate place to do
# auth checks. If we need the invite and don't have it then the
# auth check code will explode appropriately.
builder = self.event_builder_factory.new(room_version, event_dict)
EventValidator().validate_builder(builder)
event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
)
EventValidator().validate_new(event, self.config)
return (event, context)
async def _check_signature(self, event, context):
"""
Checks that the signature in the event is consistent with its invite.
Args:
event (Event): The m.room.member event to check
context (EventContext):
Raises:
AuthError: if signature didn't match any keys, or key has been
revoked,
SynapseError: if a transient error meant a key couldn't be checked
for revocation.
"""
signed = event.content["third_party_invite"]["signed"]
token = signed["token"]
prev_state_ids = await context.get_prev_state_ids()
invite_event_id = prev_state_ids.get((EventTypes.ThirdPartyInvite, token))
invite_event = None
if invite_event_id:
invite_event = await self.store.get_event(invite_event_id, allow_none=True)
if not invite_event:
raise AuthError(403, "Could not find invite")
logger.debug("Checking auth on event %r", event.content)
last_exception = None # type: Optional[Exception]
# for each public key in the 3pid invite event
for public_key_object in self.hs.get_auth().get_public_keys(invite_event):
try:
# for each sig on the third_party_invite block of the actual invite
for server, signature_block in signed["signatures"].items():
for key_name, encoded_signature in signature_block.items():
if not key_name.startswith("ed25519:"):
continue
logger.debug(
"Attempting to verify sig with key %s from %r "
"against pubkey %r",
key_name,
server,
public_key_object,
)
try:
public_key = public_key_object["public_key"]
verify_key = decode_verify_key_bytes(
key_name, decode_base64(public_key)
)
verify_signed_json(signed, server, verify_key)
logger.debug(
"Successfully verified sig with key %s from %r "
"against pubkey %r",
key_name,
server,
public_key_object,
)
except Exception:
logger.info(
"Failed to verify sig with key %s from %r "
"against pubkey %r",
key_name,
server,
public_key_object,
)
raise
try:
if "key_validity_url" in public_key_object:
await self._check_key_revocation(
public_key, public_key_object["key_validity_url"]
)
except Exception:
logger.info(
"Failed to query key_validity_url %s",
public_key_object["key_validity_url"],
)
raise
return
except Exception as e:
last_exception = e
if last_exception is None:
# we can only get here if get_public_keys() returned an empty list
# TODO: make this better
raise RuntimeError("no public key in invite event")
raise last_exception
async def _check_key_revocation(self, public_key, url):
"""
Checks whether public_key has been revoked.
Args:
public_key (str): base-64 encoded public key.
url (str): Key revocation URL.
Raises:
AuthError: if they key has been revoked.
SynapseError: if a transient error meant a key couldn't be checked
for revocation.
"""
try:
response = await self.http_client.get_json(url, {"public_key": public_key})
except Exception:
raise SynapseError(502, "Third party certificate could not be checked")
if "valid" not in response or not response["valid"]:
raise AuthError(403, "Third party certificate was invalid")
async def persist_events_and_notify(
self,
room_id: str,
event_and_contexts: Sequence[Tuple[EventBase, EventContext]],
backfilled: bool = False,
) -> int:
"""Persists events and tells the notifier/pushers about them, if
necessary.
Args:
room_id: The room ID of events being persisted.
event_and_contexts: Sequence of events with their associated
context that should be persisted. All events must belong to
the same room.
backfilled: Whether these events are a result of
backfilling or not
"""
instance = self.config.worker.events_shard_config.get_instance(room_id)
if instance != self._instance_name:
result = await self._send_events(
instance_name=instance,
store=self.store,
room_id=room_id,
event_and_contexts=event_and_contexts,
backfilled=backfilled,
)
return result["max_stream_id"]
else:
assert self.storage.persistence
# Note that this returns the events that were persisted, which may not be
# the same as were passed in if some were deduplicated due to transaction IDs.
events, max_stream_token = await self.storage.persistence.persist_events(
event_and_contexts, backfilled=backfilled
)
if self._ephemeral_messages_enabled:
for event in events:
# If there's an expiry timestamp on the event, schedule its expiry.
self._message_handler.maybe_schedule_expiry(event)
if not backfilled: # Never notify for backfilled events
for event in events:
await self._notify_persisted_event(event, max_stream_token)
return max_stream_token.stream
async def _notify_persisted_event(
self, event: EventBase, max_stream_token: RoomStreamToken
) -> None:
"""Checks to see if notifier/pushers should be notified about the
event or not.
Args:
event:
max_stream_id: The max_stream_id returned by persist_events
"""
extra_users = []
if event.type == EventTypes.Member:
target_user_id = event.state_key
# We notify for memberships if its an invite for one of our
# users
if event.internal_metadata.is_outlier():
if event.membership != Membership.INVITE:
if not self.is_mine_id(target_user_id):
return
target_user = UserID.from_string(target_user_id)
extra_users.append(target_user)
elif event.internal_metadata.is_outlier():
return
# the event has been persisted so it should have a stream ordering.
assert event.internal_metadata.stream_ordering
event_pos = PersistedEventPosition(
self._instance_name, event.internal_metadata.stream_ordering
)
self.notifier.on_new_room_event(
event, event_pos, max_stream_token, extra_users=extra_users
)
async def _clean_room_for_join(self, room_id: str) -> None:
"""Called to clean up any data in DB for a given room, ready for the
server to join the room.
Args:
room_id
"""
if self.config.worker_app:
await self._clean_room_for_join_client(room_id)
else:
await self.store.clean_room_for_join(room_id)
async def get_room_complexity(
self, remote_room_hosts: List[str], room_id: str
) -> Optional[dict]:
"""
Fetch the complexity of a remote room over federation.
Args:
remote_room_hosts (list[str]): The remote servers to ask.
room_id (str): The room ID to ask about.
Returns:
Dict contains the complexity
metric versions, while None means we could not fetch the complexity.
"""
for host in remote_room_hosts:
res = await self.federation_client.get_room_complexity(host, room_id)
# We got a result, return it.
if res:
return res
# We fell off the bottom, couldn't get the complexity from anyone. Oh
# well.
return None
| ./CrossVul/dataset_final_sorted/CWE-400/py/bad_4373_3 |
crossvul-python_data_bad_4373_2 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import re
from typing import Optional, Tuple, Type
import synapse
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
from synapse.api.room_versions import RoomVersions
from synapse.api.urls import (
FEDERATION_UNSTABLE_PREFIX,
FEDERATION_V1_PREFIX,
FEDERATION_V2_PREFIX,
)
from synapse.http.endpoint import parse_and_validate_server_name
from synapse.http.server import JsonResource
from synapse.http.servlet import (
parse_boolean_from_args,
parse_integer_from_args,
parse_json_object_from_request,
parse_string_from_args,
)
from synapse.logging.context import run_in_background
from synapse.logging.opentracing import (
start_active_span,
start_active_span_from_request,
tags,
whitelisted_homeserver,
)
from synapse.server import HomeServer
from synapse.types import ThirdPartyInstanceID, get_domain_from_id
from synapse.util.versionstring import get_version_string
logger = logging.getLogger(__name__)
class TransportLayerServer(JsonResource):
"""Handles incoming federation HTTP requests"""
def __init__(self, hs, servlet_groups=None):
"""Initialize the TransportLayerServer
Will by default register all servlets. For custom behaviour, pass in
a list of servlet_groups to register.
Args:
hs (synapse.server.HomeServer): homeserver
servlet_groups (list[str], optional): List of servlet groups to register.
Defaults to ``DEFAULT_SERVLET_GROUPS``.
"""
self.hs = hs
self.clock = hs.get_clock()
self.servlet_groups = servlet_groups
super().__init__(hs, canonical_json=False)
self.authenticator = Authenticator(hs)
self.ratelimiter = hs.get_federation_ratelimiter()
self.register_servlets()
def register_servlets(self):
register_servlets(
self.hs,
resource=self,
ratelimiter=self.ratelimiter,
authenticator=self.authenticator,
servlet_groups=self.servlet_groups,
)
class AuthenticationError(SynapseError):
"""There was a problem authenticating the request"""
pass
class NoAuthenticationError(AuthenticationError):
"""The request had no authentication information"""
pass
class Authenticator:
def __init__(self, hs: HomeServer):
self._clock = hs.get_clock()
self.keyring = hs.get_keyring()
self.server_name = hs.hostname
self.store = hs.get_datastore()
self.federation_domain_whitelist = hs.config.federation_domain_whitelist
self.notifier = hs.get_notifier()
self.replication_client = None
if hs.config.worker.worker_app:
self.replication_client = hs.get_tcp_replication()
# A method just so we can pass 'self' as the authenticator to the Servlets
async def authenticate_request(self, request, content):
now = self._clock.time_msec()
json_request = {
"method": request.method.decode("ascii"),
"uri": request.uri.decode("ascii"),
"destination": self.server_name,
"signatures": {},
}
if content is not None:
json_request["content"] = content
origin = None
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
if not auth_headers:
raise NoAuthenticationError(
401, "Missing Authorization headers", Codes.UNAUTHORIZED
)
for auth in auth_headers:
if auth.startswith(b"X-Matrix"):
(origin, key, sig) = _parse_auth_header(auth)
json_request["origin"] = origin
json_request["signatures"].setdefault(origin, {})[key] = sig
if (
self.federation_domain_whitelist is not None
and origin not in self.federation_domain_whitelist
):
raise FederationDeniedError(origin)
if not json_request["signatures"]:
raise NoAuthenticationError(
401, "Missing Authorization headers", Codes.UNAUTHORIZED
)
await self.keyring.verify_json_for_server(
origin, json_request, now, "Incoming request"
)
logger.debug("Request from %s", origin)
request.requester = origin
# If we get a valid signed request from the other side, its probably
# alive
retry_timings = await self.store.get_destination_retry_timings(origin)
if retry_timings and retry_timings["retry_last_ts"]:
run_in_background(self._reset_retry_timings, origin)
return origin
async def _reset_retry_timings(self, origin):
try:
logger.info("Marking origin %r as up", origin)
await self.store.set_destination_retry_timings(origin, None, 0, 0)
# Inform the relevant places that the remote server is back up.
self.notifier.notify_remote_server_up(origin)
if self.replication_client:
# If we're on a worker we try and inform master about this. The
# replication client doesn't hook into the notifier to avoid
# infinite loops where we send a `REMOTE_SERVER_UP` command to
# master, which then echoes it back to us which in turn pokes
# the notifier.
self.replication_client.send_remote_server_up(origin)
except Exception:
logger.exception("Error resetting retry timings on %s", origin)
def _parse_auth_header(header_bytes):
"""Parse an X-Matrix auth header
Args:
header_bytes (bytes): header value
Returns:
Tuple[str, str, str]: origin, key id, signature.
Raises:
AuthenticationError if the header could not be parsed
"""
try:
header_str = header_bytes.decode("utf-8")
params = header_str.split(" ")[1].split(",")
param_dict = dict(kv.split("=") for kv in params)
def strip_quotes(value):
if value.startswith('"'):
return value[1:-1]
else:
return value
origin = strip_quotes(param_dict["origin"])
# ensure that the origin is a valid server name
parse_and_validate_server_name(origin)
key = strip_quotes(param_dict["key"])
sig = strip_quotes(param_dict["sig"])
return origin, key, sig
except Exception as e:
logger.warning(
"Error parsing auth header '%s': %s",
header_bytes.decode("ascii", "replace"),
e,
)
raise AuthenticationError(
400, "Malformed Authorization header", Codes.UNAUTHORIZED
)
class BaseFederationServlet:
"""Abstract base class for federation servlet classes.
The servlet object should have a PATH attribute which takes the form of a regexp to
match against the request path (excluding the /federation/v1 prefix).
The servlet should also implement one or more of on_GET, on_POST, on_PUT, to match
the appropriate HTTP method. These methods must be *asynchronous* and have the
signature:
on_<METHOD>(self, origin, content, query, **kwargs)
With arguments:
origin (unicode|None): The authenticated server_name of the calling server,
unless REQUIRE_AUTH is set to False and authentication failed.
content (unicode|None): decoded json body of the request. None if the
request was a GET.
query (dict[bytes, list[bytes]]): Query params from the request. url-decoded
(ie, '+' and '%xx' are decoded) but note that it is *not* utf8-decoded
yet.
**kwargs (dict[unicode, unicode]): the dict mapping keys to path
components as specified in the path match regexp.
Returns:
Optional[Tuple[int, object]]: either (response code, response object) to
return a JSON response, or None if the request has already been handled.
Raises:
SynapseError: to return an error code
Exception: other exceptions will be caught, logged, and a 500 will be
returned.
"""
PATH = "" # Overridden in subclasses, the regex to match against the path.
REQUIRE_AUTH = True
PREFIX = FEDERATION_V1_PREFIX # Allows specifying the API version
RATELIMIT = True # Whether to rate limit requests or not
def __init__(self, handler, authenticator, ratelimiter, server_name):
self.handler = handler
self.authenticator = authenticator
self.ratelimiter = ratelimiter
def _wrap(self, func):
authenticator = self.authenticator
ratelimiter = self.ratelimiter
@functools.wraps(func)
async def new_func(request, *args, **kwargs):
"""A callback which can be passed to HttpServer.RegisterPaths
Args:
request (twisted.web.http.Request):
*args: unused?
**kwargs (dict[unicode, unicode]): the dict mapping keys to path
components as specified in the path match regexp.
Returns:
Tuple[int, object]|None: (response code, response object) as returned by
the callback method. None if the request has already been handled.
"""
content = None
if request.method in [b"PUT", b"POST"]:
# TODO: Handle other method types? other content types?
content = parse_json_object_from_request(request)
try:
origin = await authenticator.authenticate_request(request, content)
except NoAuthenticationError:
origin = None
if self.REQUIRE_AUTH:
logger.warning(
"authenticate_request failed: missing authentication"
)
raise
except Exception as e:
logger.warning("authenticate_request failed: %s", e)
raise
request_tags = {
"request_id": request.get_request_id(),
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
tags.HTTP_METHOD: request.get_method(),
tags.HTTP_URL: request.get_redacted_uri(),
tags.PEER_HOST_IPV6: request.getClientIP(),
"authenticated_entity": origin,
"servlet_name": request.request_metrics.name,
}
# Only accept the span context if the origin is authenticated
# and whitelisted
if origin and whitelisted_homeserver(origin):
scope = start_active_span_from_request(
request, "incoming-federation-request", tags=request_tags
)
else:
scope = start_active_span(
"incoming-federation-request", tags=request_tags
)
with scope:
if origin and self.RATELIMIT:
with ratelimiter.ratelimit(origin) as d:
await d
if request._disconnected:
logger.warning(
"client disconnected before we started processing "
"request"
)
return -1, None
response = await func(
origin, content, request.args, *args, **kwargs
)
else:
response = await func(
origin, content, request.args, *args, **kwargs
)
return response
return new_func
def register(self, server):
pattern = re.compile("^" + self.PREFIX + self.PATH + "$")
for method in ("GET", "PUT", "POST"):
code = getattr(self, "on_%s" % (method), None)
if code is None:
continue
server.register_paths(
method, (pattern,), self._wrap(code), self.__class__.__name__,
)
class FederationSendServlet(BaseFederationServlet):
PATH = "/send/(?P<transaction_id>[^/]*)/?"
# We ratelimit manually in the handler as we queue up the requests and we
# don't want to fill up the ratelimiter with blocked requests.
RATELIMIT = False
def __init__(self, handler, server_name, **kwargs):
super().__init__(handler, server_name=server_name, **kwargs)
self.server_name = server_name
# This is when someone is trying to send us a bunch of data.
async def on_PUT(self, origin, content, query, transaction_id):
""" Called on PUT /send/<transaction_id>/
Args:
request (twisted.web.http.Request): The HTTP request.
transaction_id (str): The transaction_id associated with this
request. This is *not* None.
Returns:
Tuple of `(code, response)`, where
`response` is a python dict to be converted into JSON that is
used as the response body.
"""
# Parse the request
try:
transaction_data = content
logger.debug("Decoded %s: %s", transaction_id, str(transaction_data))
logger.info(
"Received txn %s from %s. (PDUs: %d, EDUs: %d)",
transaction_id,
origin,
len(transaction_data.get("pdus", [])),
len(transaction_data.get("edus", [])),
)
# We should ideally be getting this from the security layer.
# origin = body["origin"]
# Add some extra data to the transaction dict that isn't included
# in the request body.
transaction_data.update(
transaction_id=transaction_id, destination=self.server_name
)
except Exception as e:
logger.exception(e)
return 400, {"error": "Invalid transaction"}
try:
code, response = await self.handler.on_incoming_transaction(
origin, transaction_data
)
except Exception:
logger.exception("on_incoming_transaction failed")
raise
return code, response
class FederationEventServlet(BaseFederationServlet):
PATH = "/event/(?P<event_id>[^/]*)/?"
# This is when someone asks for a data item for a given server data_id pair.
async def on_GET(self, origin, content, query, event_id):
return await self.handler.on_pdu_request(origin, event_id)
class FederationStateV1Servlet(BaseFederationServlet):
PATH = "/state/(?P<context>[^/]*)/?"
# This is when someone asks for all data for a given context.
async def on_GET(self, origin, content, query, context):
return await self.handler.on_context_state_request(
origin,
context,
parse_string_from_args(query, "event_id", None, required=False),
)
class FederationStateIdsServlet(BaseFederationServlet):
PATH = "/state_ids/(?P<room_id>[^/]*)/?"
async def on_GET(self, origin, content, query, room_id):
return await self.handler.on_state_ids_request(
origin,
room_id,
parse_string_from_args(query, "event_id", None, required=True),
)
class FederationBackfillServlet(BaseFederationServlet):
PATH = "/backfill/(?P<context>[^/]*)/?"
async def on_GET(self, origin, content, query, context):
versions = [x.decode("ascii") for x in query[b"v"]]
limit = parse_integer_from_args(query, "limit", None)
if not limit:
return 400, {"error": "Did not include limit param"}
return await self.handler.on_backfill_request(origin, context, versions, limit)
class FederationQueryServlet(BaseFederationServlet):
PATH = "/query/(?P<query_type>[^/]*)"
# This is when we receive a server-server Query
async def on_GET(self, origin, content, query, query_type):
return await self.handler.on_query_request(
query_type,
{k.decode("utf8"): v[0].decode("utf-8") for k, v in query.items()},
)
class FederationMakeJoinServlet(BaseFederationServlet):
PATH = "/make_join/(?P<context>[^/]*)/(?P<user_id>[^/]*)"
async def on_GET(self, origin, _content, query, context, user_id):
"""
Args:
origin (unicode): The authenticated server_name of the calling server
_content (None): (GETs don't have bodies)
query (dict[bytes, list[bytes]]): Query params from the request.
**kwargs (dict[unicode, unicode]): the dict mapping keys to path
components as specified in the path match regexp.
Returns:
Tuple[int, object]: (response code, response object)
"""
versions = query.get(b"ver")
if versions is not None:
supported_versions = [v.decode("utf-8") for v in versions]
else:
supported_versions = ["1"]
content = await self.handler.on_make_join_request(
origin, context, user_id, supported_versions=supported_versions
)
return 200, content
class FederationMakeLeaveServlet(BaseFederationServlet):
PATH = "/make_leave/(?P<context>[^/]*)/(?P<user_id>[^/]*)"
async def on_GET(self, origin, content, query, context, user_id):
content = await self.handler.on_make_leave_request(origin, context, user_id)
return 200, content
class FederationV1SendLeaveServlet(BaseFederationServlet):
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_PUT(self, origin, content, query, room_id, event_id):
content = await self.handler.on_send_leave_request(origin, content, room_id)
return 200, (200, content)
class FederationV2SendLeaveServlet(BaseFederationServlet):
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
PREFIX = FEDERATION_V2_PREFIX
async def on_PUT(self, origin, content, query, room_id, event_id):
content = await self.handler.on_send_leave_request(origin, content, room_id)
return 200, content
class FederationEventAuthServlet(BaseFederationServlet):
PATH = "/event_auth/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
async def on_GET(self, origin, content, query, context, event_id):
return await self.handler.on_event_auth(origin, context, event_id)
class FederationV1SendJoinServlet(BaseFederationServlet):
PATH = "/send_join/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
async def on_PUT(self, origin, content, query, context, event_id):
# TODO(paul): assert that context/event_id parsed from path actually
# match those given in content
content = await self.handler.on_send_join_request(origin, content, context)
return 200, (200, content)
class FederationV2SendJoinServlet(BaseFederationServlet):
PATH = "/send_join/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
PREFIX = FEDERATION_V2_PREFIX
async def on_PUT(self, origin, content, query, context, event_id):
# TODO(paul): assert that context/event_id parsed from path actually
# match those given in content
content = await self.handler.on_send_join_request(origin, content, context)
return 200, content
class FederationV1InviteServlet(BaseFederationServlet):
PATH = "/invite/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
async def on_PUT(self, origin, content, query, context, event_id):
# We don't get a room version, so we have to assume its EITHER v1 or
# v2. This is "fine" as the only difference between V1 and V2 is the
# state resolution algorithm, and we don't use that for processing
# invites
content = await self.handler.on_invite_request(
origin, content, room_version_id=RoomVersions.V1.identifier
)
# V1 federation API is defined to return a content of `[200, {...}]`
# due to a historical bug.
return 200, (200, content)
class FederationV2InviteServlet(BaseFederationServlet):
PATH = "/invite/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
PREFIX = FEDERATION_V2_PREFIX
async def on_PUT(self, origin, content, query, context, event_id):
# TODO(paul): assert that context/event_id parsed from path actually
# match those given in content
room_version = content["room_version"]
event = content["event"]
invite_room_state = content["invite_room_state"]
# Synapse expects invite_room_state to be in unsigned, as it is in v1
# API
event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state
content = await self.handler.on_invite_request(
origin, event, room_version_id=room_version
)
return 200, content
class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet):
PATH = "/exchange_third_party_invite/(?P<room_id>[^/]*)"
async def on_PUT(self, origin, content, query, room_id):
content = await self.handler.on_exchange_third_party_invite_request(
room_id, content
)
return 200, content
class FederationClientKeysQueryServlet(BaseFederationServlet):
PATH = "/user/keys/query"
async def on_POST(self, origin, content, query):
return await self.handler.on_query_client_keys(origin, content)
class FederationUserDevicesQueryServlet(BaseFederationServlet):
PATH = "/user/devices/(?P<user_id>[^/]*)"
async def on_GET(self, origin, content, query, user_id):
return await self.handler.on_query_user_devices(origin, user_id)
class FederationClientKeysClaimServlet(BaseFederationServlet):
PATH = "/user/keys/claim"
async def on_POST(self, origin, content, query):
response = await self.handler.on_claim_client_keys(origin, content)
return 200, response
class FederationGetMissingEventsServlet(BaseFederationServlet):
# TODO(paul): Why does this path alone end with "/?" optional?
PATH = "/get_missing_events/(?P<room_id>[^/]*)/?"
async def on_POST(self, origin, content, query, room_id):
limit = int(content.get("limit", 10))
earliest_events = content.get("earliest_events", [])
latest_events = content.get("latest_events", [])
content = await self.handler.on_get_missing_events(
origin,
room_id=room_id,
earliest_events=earliest_events,
latest_events=latest_events,
limit=limit,
)
return 200, content
class On3pidBindServlet(BaseFederationServlet):
PATH = "/3pid/onbind"
REQUIRE_AUTH = False
async def on_POST(self, origin, content, query):
if "invites" in content:
last_exception = None
for invite in content["invites"]:
try:
if "signed" not in invite or "token" not in invite["signed"]:
message = (
"Rejecting received notification of third-"
"party invite without signed: %s" % (invite,)
)
logger.info(message)
raise SynapseError(400, message)
await self.handler.exchange_third_party_invite(
invite["sender"],
invite["mxid"],
invite["room_id"],
invite["signed"],
)
except Exception as e:
last_exception = e
if last_exception:
raise last_exception
return 200, {}
class OpenIdUserInfo(BaseFederationServlet):
"""
Exchange a bearer token for information about a user.
The response format should be compatible with:
http://openid.net/specs/openid-connect-core-1_0.html#UserInfoResponse
GET /openid/userinfo?access_token=ABDEFGH HTTP/1.1
HTTP/1.1 200 OK
Content-Type: application/json
{
"sub": "@userpart:example.org",
}
"""
PATH = "/openid/userinfo"
REQUIRE_AUTH = False
async def on_GET(self, origin, content, query):
token = query.get(b"access_token", [None])[0]
if token is None:
return (
401,
{"errcode": "M_MISSING_TOKEN", "error": "Access Token required"},
)
user_id = await self.handler.on_openid_userinfo(token.decode("ascii"))
if user_id is None:
return (
401,
{
"errcode": "M_UNKNOWN_TOKEN",
"error": "Access Token unknown or expired",
},
)
return 200, {"sub": user_id}
class PublicRoomList(BaseFederationServlet):
"""
Fetch the public room list for this server.
This API returns information in the same format as /publicRooms on the
client API, but will only ever include local public rooms and hence is
intended for consumption by other homeservers.
GET /publicRooms HTTP/1.1
HTTP/1.1 200 OK
Content-Type: application/json
{
"chunk": [
{
"aliases": [
"#test:localhost"
],
"guest_can_join": false,
"name": "test room",
"num_joined_members": 3,
"room_id": "!whkydVegtvatLfXmPN:localhost",
"world_readable": false
}
],
"end": "END",
"start": "START"
}
"""
PATH = "/publicRooms"
def __init__(self, handler, authenticator, ratelimiter, server_name, allow_access):
super().__init__(handler, authenticator, ratelimiter, server_name)
self.allow_access = allow_access
async def on_GET(self, origin, content, query):
if not self.allow_access:
raise FederationDeniedError(origin)
limit = parse_integer_from_args(query, "limit", 0)
since_token = parse_string_from_args(query, "since", None)
include_all_networks = parse_boolean_from_args(
query, "include_all_networks", False
)
third_party_instance_id = parse_string_from_args(
query, "third_party_instance_id", None
)
if include_all_networks:
network_tuple = None
elif third_party_instance_id:
network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
else:
network_tuple = ThirdPartyInstanceID(None, None)
if limit == 0:
# zero is a special value which corresponds to no limit.
limit = None
data = await self.handler.get_local_public_room_list(
limit, since_token, network_tuple=network_tuple, from_federation=True
)
return 200, data
async def on_POST(self, origin, content, query):
# This implements MSC2197 (Search Filtering over Federation)
if not self.allow_access:
raise FederationDeniedError(origin)
limit = int(content.get("limit", 100)) # type: Optional[int]
since_token = content.get("since", None)
search_filter = content.get("filter", None)
include_all_networks = content.get("include_all_networks", False)
third_party_instance_id = content.get("third_party_instance_id", None)
if include_all_networks:
network_tuple = None
if third_party_instance_id is not None:
raise SynapseError(
400, "Can't use include_all_networks with an explicit network"
)
elif third_party_instance_id is None:
network_tuple = ThirdPartyInstanceID(None, None)
else:
network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
if search_filter is None:
logger.warning("Nonefilter")
if limit == 0:
# zero is a special value which corresponds to no limit.
limit = None
data = await self.handler.get_local_public_room_list(
limit=limit,
since_token=since_token,
search_filter=search_filter,
network_tuple=network_tuple,
from_federation=True,
)
return 200, data
class FederationVersionServlet(BaseFederationServlet):
PATH = "/version"
REQUIRE_AUTH = False
async def on_GET(self, origin, content, query):
return (
200,
{"server": {"name": "Synapse", "version": get_version_string(synapse)}},
)
class FederationGroupsProfileServlet(BaseFederationServlet):
"""Get/set the basic profile of a group on behalf of a user
"""
PATH = "/groups/(?P<group_id>[^/]*)/profile"
async def on_GET(self, origin, content, query, group_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_group_profile(group_id, requester_user_id)
return 200, new_content
async def on_POST(self, origin, content, query, group_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.update_group_profile(
group_id, requester_user_id, content
)
return 200, new_content
class FederationGroupsSummaryServlet(BaseFederationServlet):
PATH = "/groups/(?P<group_id>[^/]*)/summary"
async def on_GET(self, origin, content, query, group_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_group_summary(group_id, requester_user_id)
return 200, new_content
class FederationGroupsRoomsServlet(BaseFederationServlet):
"""Get the rooms in a group on behalf of a user
"""
PATH = "/groups/(?P<group_id>[^/]*)/rooms"
async def on_GET(self, origin, content, query, group_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_rooms_in_group(group_id, requester_user_id)
return 200, new_content
class FederationGroupsAddRoomsServlet(BaseFederationServlet):
"""Add/remove room from group
"""
PATH = "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
async def on_POST(self, origin, content, query, group_id, room_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.add_room_to_group(
group_id, requester_user_id, room_id, content
)
return 200, new_content
async def on_DELETE(self, origin, content, query, group_id, room_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.remove_room_from_group(
group_id, requester_user_id, room_id
)
return 200, new_content
class FederationGroupsAddRoomsConfigServlet(BaseFederationServlet):
"""Update room config in group
"""
PATH = (
"/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
"/config/(?P<config_key>[^/]*)"
)
async def on_POST(self, origin, content, query, group_id, room_id, config_key):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
result = await self.handler.update_room_in_group(
group_id, requester_user_id, room_id, config_key, content
)
return 200, result
class FederationGroupsUsersServlet(BaseFederationServlet):
"""Get the users in a group on behalf of a user
"""
PATH = "/groups/(?P<group_id>[^/]*)/users"
async def on_GET(self, origin, content, query, group_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_users_in_group(group_id, requester_user_id)
return 200, new_content
class FederationGroupsInvitedUsersServlet(BaseFederationServlet):
"""Get the users that have been invited to a group
"""
PATH = "/groups/(?P<group_id>[^/]*)/invited_users"
async def on_GET(self, origin, content, query, group_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_invited_users_in_group(
group_id, requester_user_id
)
return 200, new_content
class FederationGroupsInviteServlet(BaseFederationServlet):
"""Ask a group server to invite someone to the group
"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
async def on_POST(self, origin, content, query, group_id, user_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.invite_to_group(
group_id, user_id, requester_user_id, content
)
return 200, new_content
class FederationGroupsAcceptInviteServlet(BaseFederationServlet):
"""Accept an invitation from the group server
"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/accept_invite"
async def on_POST(self, origin, content, query, group_id, user_id):
if get_domain_from_id(user_id) != origin:
raise SynapseError(403, "user_id doesn't match origin")
new_content = await self.handler.accept_invite(group_id, user_id, content)
return 200, new_content
class FederationGroupsJoinServlet(BaseFederationServlet):
"""Attempt to join a group
"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/join"
async def on_POST(self, origin, content, query, group_id, user_id):
if get_domain_from_id(user_id) != origin:
raise SynapseError(403, "user_id doesn't match origin")
new_content = await self.handler.join_group(group_id, user_id, content)
return 200, new_content
class FederationGroupsRemoveUserServlet(BaseFederationServlet):
"""Leave or kick a user from the group
"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
async def on_POST(self, origin, content, query, group_id, user_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.remove_user_from_group(
group_id, user_id, requester_user_id, content
)
return 200, new_content
class FederationGroupsLocalInviteServlet(BaseFederationServlet):
"""A group server has invited a local user
"""
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
async def on_POST(self, origin, content, query, group_id, user_id):
if get_domain_from_id(group_id) != origin:
raise SynapseError(403, "group_id doesn't match origin")
new_content = await self.handler.on_invite(group_id, user_id, content)
return 200, new_content
class FederationGroupsRemoveLocalUserServlet(BaseFederationServlet):
"""A group server has removed a local user
"""
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
async def on_POST(self, origin, content, query, group_id, user_id):
if get_domain_from_id(group_id) != origin:
raise SynapseError(403, "user_id doesn't match origin")
new_content = await self.handler.user_removed_from_group(
group_id, user_id, content
)
return 200, new_content
class FederationGroupsRenewAttestaionServlet(BaseFederationServlet):
"""A group or user's server renews their attestation
"""
PATH = "/groups/(?P<group_id>[^/]*)/renew_attestation/(?P<user_id>[^/]*)"
async def on_POST(self, origin, content, query, group_id, user_id):
# We don't need to check auth here as we check the attestation signatures
new_content = await self.handler.on_renew_attestation(
group_id, user_id, content
)
return 200, new_content
class FederationGroupsSummaryRoomsServlet(BaseFederationServlet):
"""Add/remove a room from the group summary, with optional category.
Matches both:
- /groups/:group/summary/rooms/:room_id
- /groups/:group/summary/categories/:category/rooms/:room_id
"""
PATH = (
"/groups/(?P<group_id>[^/]*)/summary"
"(/categories/(?P<category_id>[^/]+))?"
"/rooms/(?P<room_id>[^/]*)"
)
async def on_POST(self, origin, content, query, group_id, category_id, room_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(400, "category_id cannot be empty string")
resp = await self.handler.update_group_summary_room(
group_id,
requester_user_id,
room_id=room_id,
category_id=category_id,
content=content,
)
return 200, resp
async def on_DELETE(self, origin, content, query, group_id, category_id, room_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(400, "category_id cannot be empty string")
resp = await self.handler.delete_group_summary_room(
group_id, requester_user_id, room_id=room_id, category_id=category_id
)
return 200, resp
class FederationGroupsCategoriesServlet(BaseFederationServlet):
"""Get all categories for a group
"""
PATH = "/groups/(?P<group_id>[^/]*)/categories/?"
async def on_GET(self, origin, content, query, group_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
resp = await self.handler.get_group_categories(group_id, requester_user_id)
return 200, resp
class FederationGroupsCategoryServlet(BaseFederationServlet):
"""Add/remove/get a category in a group
"""
PATH = "/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)"
async def on_GET(self, origin, content, query, group_id, category_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
resp = await self.handler.get_group_category(
group_id, requester_user_id, category_id
)
return 200, resp
async def on_POST(self, origin, content, query, group_id, category_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(400, "category_id cannot be empty string")
resp = await self.handler.upsert_group_category(
group_id, requester_user_id, category_id, content
)
return 200, resp
async def on_DELETE(self, origin, content, query, group_id, category_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(400, "category_id cannot be empty string")
resp = await self.handler.delete_group_category(
group_id, requester_user_id, category_id
)
return 200, resp
class FederationGroupsRolesServlet(BaseFederationServlet):
"""Get roles in a group
"""
PATH = "/groups/(?P<group_id>[^/]*)/roles/?"
async def on_GET(self, origin, content, query, group_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
resp = await self.handler.get_group_roles(group_id, requester_user_id)
return 200, resp
class FederationGroupsRoleServlet(BaseFederationServlet):
"""Add/remove/get a role in a group
"""
PATH = "/groups/(?P<group_id>[^/]*)/roles/(?P<role_id>[^/]+)"
async def on_GET(self, origin, content, query, group_id, role_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
resp = await self.handler.get_group_role(group_id, requester_user_id, role_id)
return 200, resp
async def on_POST(self, origin, content, query, group_id, role_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if role_id == "":
raise SynapseError(400, "role_id cannot be empty string")
resp = await self.handler.update_group_role(
group_id, requester_user_id, role_id, content
)
return 200, resp
async def on_DELETE(self, origin, content, query, group_id, role_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if role_id == "":
raise SynapseError(400, "role_id cannot be empty string")
resp = await self.handler.delete_group_role(
group_id, requester_user_id, role_id
)
return 200, resp
class FederationGroupsSummaryUsersServlet(BaseFederationServlet):
"""Add/remove a user from the group summary, with optional role.
Matches both:
- /groups/:group/summary/users/:user_id
- /groups/:group/summary/roles/:role/users/:user_id
"""
PATH = (
"/groups/(?P<group_id>[^/]*)/summary"
"(/roles/(?P<role_id>[^/]+))?"
"/users/(?P<user_id>[^/]*)"
)
async def on_POST(self, origin, content, query, group_id, role_id, user_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if role_id == "":
raise SynapseError(400, "role_id cannot be empty string")
resp = await self.handler.update_group_summary_user(
group_id,
requester_user_id,
user_id=user_id,
role_id=role_id,
content=content,
)
return 200, resp
async def on_DELETE(self, origin, content, query, group_id, role_id, user_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if role_id == "":
raise SynapseError(400, "role_id cannot be empty string")
resp = await self.handler.delete_group_summary_user(
group_id, requester_user_id, user_id=user_id, role_id=role_id
)
return 200, resp
class FederationGroupsBulkPublicisedServlet(BaseFederationServlet):
"""Get roles in a group
"""
PATH = "/get_groups_publicised"
async def on_POST(self, origin, content, query):
resp = await self.handler.bulk_get_publicised_groups(
content["user_ids"], proxy=False
)
return 200, resp
class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet):
"""Sets whether a group is joinable without an invite or knock
"""
PATH = "/groups/(?P<group_id>[^/]*)/settings/m.join_policy"
async def on_PUT(self, origin, content, query, group_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.set_group_join_policy(
group_id, requester_user_id, content
)
return 200, new_content
class RoomComplexityServlet(BaseFederationServlet):
"""
Indicates to other servers how complex (and therefore likely
resource-intensive) a public room this server knows about is.
"""
PATH = "/rooms/(?P<room_id>[^/]*)/complexity"
PREFIX = FEDERATION_UNSTABLE_PREFIX
async def on_GET(self, origin, content, query, room_id):
store = self.handler.hs.get_datastore()
is_public = await store.is_room_world_readable_or_publicly_joinable(room_id)
if not is_public:
raise SynapseError(404, "Room not found", errcode=Codes.INVALID_PARAM)
complexity = await store.get_room_complexity(room_id)
return 200, complexity
FEDERATION_SERVLET_CLASSES = (
FederationSendServlet,
FederationEventServlet,
FederationStateV1Servlet,
FederationStateIdsServlet,
FederationBackfillServlet,
FederationQueryServlet,
FederationMakeJoinServlet,
FederationMakeLeaveServlet,
FederationEventServlet,
FederationV1SendJoinServlet,
FederationV2SendJoinServlet,
FederationV1SendLeaveServlet,
FederationV2SendLeaveServlet,
FederationV1InviteServlet,
FederationV2InviteServlet,
FederationGetMissingEventsServlet,
FederationEventAuthServlet,
FederationClientKeysQueryServlet,
FederationUserDevicesQueryServlet,
FederationClientKeysClaimServlet,
FederationThirdPartyInviteExchangeServlet,
On3pidBindServlet,
FederationVersionServlet,
RoomComplexityServlet,
) # type: Tuple[Type[BaseFederationServlet], ...]
OPENID_SERVLET_CLASSES = (
OpenIdUserInfo,
) # type: Tuple[Type[BaseFederationServlet], ...]
ROOM_LIST_CLASSES = (PublicRoomList,) # type: Tuple[Type[PublicRoomList], ...]
GROUP_SERVER_SERVLET_CLASSES = (
FederationGroupsProfileServlet,
FederationGroupsSummaryServlet,
FederationGroupsRoomsServlet,
FederationGroupsUsersServlet,
FederationGroupsInvitedUsersServlet,
FederationGroupsInviteServlet,
FederationGroupsAcceptInviteServlet,
FederationGroupsJoinServlet,
FederationGroupsRemoveUserServlet,
FederationGroupsSummaryRoomsServlet,
FederationGroupsCategoriesServlet,
FederationGroupsCategoryServlet,
FederationGroupsRolesServlet,
FederationGroupsRoleServlet,
FederationGroupsSummaryUsersServlet,
FederationGroupsAddRoomsServlet,
FederationGroupsAddRoomsConfigServlet,
FederationGroupsSettingJoinPolicyServlet,
) # type: Tuple[Type[BaseFederationServlet], ...]
GROUP_LOCAL_SERVLET_CLASSES = (
FederationGroupsLocalInviteServlet,
FederationGroupsRemoveLocalUserServlet,
FederationGroupsBulkPublicisedServlet,
) # type: Tuple[Type[BaseFederationServlet], ...]
GROUP_ATTESTATION_SERVLET_CLASSES = (
FederationGroupsRenewAttestaionServlet,
) # type: Tuple[Type[BaseFederationServlet], ...]
DEFAULT_SERVLET_GROUPS = (
"federation",
"room_list",
"group_server",
"group_local",
"group_attestation",
"openid",
)
def register_servlets(hs, resource, authenticator, ratelimiter, servlet_groups=None):
"""Initialize and register servlet classes.
Will by default register all servlets. For custom behaviour, pass in
a list of servlet_groups to register.
Args:
hs (synapse.server.HomeServer): homeserver
resource (TransportLayerServer): resource class to register to
authenticator (Authenticator): authenticator to use
ratelimiter (util.ratelimitutils.FederationRateLimiter): ratelimiter to use
servlet_groups (list[str], optional): List of servlet groups to register.
Defaults to ``DEFAULT_SERVLET_GROUPS``.
"""
if not servlet_groups:
servlet_groups = DEFAULT_SERVLET_GROUPS
if "federation" in servlet_groups:
for servletclass in FEDERATION_SERVLET_CLASSES:
servletclass(
handler=hs.get_federation_server(),
authenticator=authenticator,
ratelimiter=ratelimiter,
server_name=hs.hostname,
).register(resource)
if "openid" in servlet_groups:
for servletclass in OPENID_SERVLET_CLASSES:
servletclass(
handler=hs.get_federation_server(),
authenticator=authenticator,
ratelimiter=ratelimiter,
server_name=hs.hostname,
).register(resource)
if "room_list" in servlet_groups:
for servletclass in ROOM_LIST_CLASSES:
servletclass(
handler=hs.get_room_list_handler(),
authenticator=authenticator,
ratelimiter=ratelimiter,
server_name=hs.hostname,
allow_access=hs.config.allow_public_rooms_over_federation,
).register(resource)
if "group_server" in servlet_groups:
for servletclass in GROUP_SERVER_SERVLET_CLASSES:
servletclass(
handler=hs.get_groups_server_handler(),
authenticator=authenticator,
ratelimiter=ratelimiter,
server_name=hs.hostname,
).register(resource)
if "group_local" in servlet_groups:
for servletclass in GROUP_LOCAL_SERVLET_CLASSES:
servletclass(
handler=hs.get_groups_local_handler(),
authenticator=authenticator,
ratelimiter=ratelimiter,
server_name=hs.hostname,
).register(resource)
if "group_attestation" in servlet_groups:
for servletclass in GROUP_ATTESTATION_SERVLET_CLASSES:
servletclass(
handler=hs.get_groups_attestation_renewer(),
authenticator=authenticator,
ratelimiter=ratelimiter,
server_name=hs.hostname,
).register(resource)
| ./CrossVul/dataset_final_sorted/CWE-400/py/bad_4373_2 |
crossvul-python_data_good_1890_0 | """Small, fast HTTP client library for Python.
Features persistent connections, cache, and Google App Engine Standard
Environment support.
"""
from __future__ import print_function
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = [
"Thomas Broyer (t.broyer@ltgt.net)",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger",
"Alex Yu",
]
__license__ = "MIT"
__version__ = "0.18.1"
import base64
import calendar
import copy
import email
import email.FeedParser
import email.Message
import email.Utils
import errno
import gzip
import httplib
import os
import random
import re
import StringIO
import sys
import time
import urllib
import urlparse
import zlib
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
# prior to Python 2.5, these were separate modules
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
from httplib2 import socks
except ImportError:
try:
import socks
except (ImportError, AttributeError):
socks = None
from httplib2 import auth
from httplib2.error import *
# Build the appropriate socket wrapper for ssl
ssl = None
ssl_SSLError = None
ssl_CertificateError = None
try:
import ssl # python 2.6
except ImportError:
pass
if ssl is not None:
ssl_SSLError = getattr(ssl, "SSLError", None)
ssl_CertificateError = getattr(ssl, "CertificateError", None)
def _ssl_wrap_socket(sock, key_file, cert_file, disable_validation, ca_certs, ssl_version, hostname, key_password):
if disable_validation:
cert_reqs = ssl.CERT_NONE
else:
cert_reqs = ssl.CERT_REQUIRED
if ssl_version is None:
ssl_version = ssl.PROTOCOL_SSLv23
if hasattr(ssl, "SSLContext"): # Python 2.7.9
context = ssl.SSLContext(ssl_version)
context.verify_mode = cert_reqs
context.check_hostname = cert_reqs != ssl.CERT_NONE
if cert_file:
if key_password:
context.load_cert_chain(cert_file, key_file, key_password)
else:
context.load_cert_chain(cert_file, key_file)
if ca_certs:
context.load_verify_locations(ca_certs)
return context.wrap_socket(sock, server_hostname=hostname)
else:
if key_password:
raise NotSupportedOnThisPlatform("Certificate with password is not supported.")
return ssl.wrap_socket(
sock, keyfile=key_file, certfile=cert_file, cert_reqs=cert_reqs, ca_certs=ca_certs, ssl_version=ssl_version,
)
def _ssl_wrap_socket_unsupported(
sock, key_file, cert_file, disable_validation, ca_certs, ssl_version, hostname, key_password
):
if not disable_validation:
raise CertificateValidationUnsupported(
"SSL certificate validation is not supported without "
"the ssl module installed. To avoid this error, install "
"the ssl module, or explicity disable validation."
)
if key_password:
raise NotSupportedOnThisPlatform("Certificate with password is not supported.")
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if ssl is None:
_ssl_wrap_socket = _ssl_wrap_socket_unsupported
if sys.version_info >= (2, 3):
from .iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, "_GLOBAL_DEFAULT_TIMEOUT"):
return timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT
return timeout is not None
__all__ = [
"Http",
"Response",
"ProxyInfo",
"HttpLib2Error",
"RedirectMissingLocation",
"RedirectLimit",
"FailedToDecompressContent",
"UnimplementedDigestAuthOptionError",
"UnimplementedHmacDigestAuthOptionError",
"debuglevel",
"ProxiesUnavailableError",
]
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# A request will be tried 'RETRIES' times if it fails at the socket/connection level.
RETRIES = 2
# Python 2.3 support
if sys.version_info < (2, 4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, "getheaders"):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception):
pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse):
pass
class RedirectLimit(HttpLib2ErrorWithResponse):
pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse):
pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse):
pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse):
pass
class MalformedHeader(HttpLib2Error):
pass
class RelativeURIError(HttpLib2Error):
pass
class ServerNotFoundError(HttpLib2Error):
pass
class ProxiesUnavailableError(HttpLib2Error):
pass
class CertificateValidationUnsupported(HttpLib2Error):
pass
class SSLHandshakeError(HttpLib2Error):
pass
class NotSupportedOnThisPlatform(HttpLib2Error):
pass
class CertificateHostnameMismatch(SSLHandshakeError):
def __init__(self, desc, host, cert):
HttpLib2Error.__init__(self, desc)
self.host = host
self.cert = cert
class NotRunningAppEngineEnvironment(HttpLib2Error):
pass
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
from httplib2 import certs
CA_CERTS = certs.where()
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = [
"connection",
"keep-alive",
"proxy-authenticate",
"proxy-authorization",
"te",
"trailers",
"transfer-encoding",
"upgrade",
]
# https://tools.ietf.org/html/rfc7231#section-8.1.3
SAFE_METHODS = ("GET", "HEAD") # TODO add "OPTIONS", "TRACE"
# To change, assign to `Http().redirect_codes`
REDIRECT_CODES = frozenset((300, 301, 302, 303, 307, 308))
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get("connection", "").split(",")])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r"^\w+://")
re_unsafe = re.compile(r"[^\w\-_.()=!]+")
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
if isinstance(filename, str):
filename_bytes = filename
filename = filename.decode("utf-8")
else:
filename_bytes = filename.encode("utf-8")
filemd5 = _md5(filename_bytes).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_unsafe.sub("", filename)
# limit length of filename (vital for Windows)
# https://github.com/httplib2/httplib2/pull/74
# C:\Users\ <username> \AppData\Local\Temp\ <safe_filename> , <md5>
# 9 chars + max 104 chars + 20 chars + x + 1 + 32 = max 259 chars
# Thus max safe filename x = 93 chars. Let it be 90 to make a round sum:
filename = filename[:90]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r"(?:\r\n)?[ \t]+")
def _normalize_headers(headers):
return dict([(key.lower(), NORMALIZE_SPACE.sub(value, " ").strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if "cache-control" in headers:
parts = headers["cache-control"].split(",")
parts_with_args = [
tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")
]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# TODO: add current time as _entry_disposition argument to avoid sleep in tests
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if "pragma" in request_headers and request_headers["pragma"].lower().find("no-cache") != -1:
retval = "TRANSPARENT"
if "cache-control" not in request_headers:
request_headers["cache-control"] = "no-cache"
elif "no-cache" in cc:
retval = "TRANSPARENT"
elif "no-cache" in cc_response:
retval = "STALE"
elif "only-if-cached" in cc:
retval = "FRESH"
elif "date" in response_headers:
date = calendar.timegm(email.Utils.parsedate_tz(response_headers["date"]))
now = time.time()
current_age = max(0, now - date)
if "max-age" in cc_response:
try:
freshness_lifetime = int(cc_response["max-age"])
except ValueError:
freshness_lifetime = 0
elif "expires" in response_headers:
expires = email.Utils.parsedate_tz(response_headers["expires"])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if "max-age" in cc:
try:
freshness_lifetime = int(cc["max-age"])
except ValueError:
freshness_lifetime = 0
if "min-fresh" in cc:
try:
min_fresh = int(cc["min-fresh"])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get("content-encoding", None)
if encoding in ["gzip", "deflate"]:
if encoding == "gzip":
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == "deflate":
content = zlib.decompress(content, -zlib.MAX_WBITS)
response["content-length"] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response["-content-encoding"] = response["content-encoding"]
del response["content-encoding"]
except (IOError, zlib.error):
content = ""
raise FailedToDecompressContent(
_("Content purported to be compressed with %s but failed to decompress.") % response.get("content-encoding"),
response,
content,
)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if "no-store" in cc or "no-store" in cc_response:
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ["status", "content-encoding", "transfer-encoding"]:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get("vary", None)
if vary:
vary_headers = vary.lower().replace(" ", "").split(",")
for header in vary_headers:
key = "-varied-%s" % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = "status: %d\r\n" % status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path) :].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-ride this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers["authorization"] = "Basic " + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
self.challenge = auth._parse_www_authenticate(response, "www-authenticate")["digest"]
qop = self.challenge.get("qop", "auth")
self.challenge["qop"] = ("auth" in [x.strip() for x in qop.split()]) and "auth" or None
if self.challenge["qop"] is None:
raise UnimplementedDigestAuthOptionError(_("Unsupported value for qop: %s." % qop))
self.challenge["algorithm"] = self.challenge.get("algorithm", "MD5").upper()
if self.challenge["algorithm"] != "MD5":
raise UnimplementedDigestAuthOptionError(
_("Unsupported value for algorithm: %s." % self.challenge["algorithm"])
)
self.A1 = "".join([self.credentials[0], ":", self.challenge["realm"], ":", self.credentials[1],])
self.challenge["nc"] = 1
def request(self, method, request_uri, headers, content, cnonce=None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge["cnonce"] = cnonce or _cnonce()
request_digest = '"%s"' % KD(
H(self.A1),
"%s:%s:%s:%s:%s"
% (
self.challenge["nonce"],
"%08x" % self.challenge["nc"],
self.challenge["cnonce"],
self.challenge["qop"],
H(A2),
),
)
headers["authorization"] = (
'Digest username="%s", realm="%s", nonce="%s", '
'uri="%s", algorithm=%s, response=%s, qop=%s, '
'nc=%08x, cnonce="%s"'
) % (
self.credentials[0],
self.challenge["realm"],
self.challenge["nonce"],
request_uri,
self.challenge["algorithm"],
request_digest,
self.challenge["qop"],
self.challenge["nc"],
self.challenge["cnonce"],
)
if self.challenge.get("opaque"):
headers["authorization"] += ', opaque="%s"' % self.challenge["opaque"]
self.challenge["nc"] += 1
def response(self, response, content):
if "authentication-info" not in response:
challenge = auth._parse_www_authenticate(response, "www-authenticate").get("digest", {})
if "true" == challenge.get("stale"):
self.challenge["nonce"] = challenge["nonce"]
self.challenge["nc"] = 1
return True
else:
updated_challenge = auth._parse_authentication_info(response, "authentication-info")
if "nextnonce" in updated_challenge:
self.challenge["nonce"] = updated_challenge["nextnonce"]
self.challenge["nc"] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer (t.broyer@ltgt.net)"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = auth._parse_www_authenticate(response, "www-authenticate")
self.challenge = challenge["hmacdigest"]
# TODO: self.challenge['domain']
self.challenge["reason"] = self.challenge.get("reason", "unauthorized")
if self.challenge["reason"] not in ["unauthorized", "integrity"]:
self.challenge["reason"] = "unauthorized"
self.challenge["salt"] = self.challenge.get("salt", "")
if not self.challenge.get("snonce"):
raise UnimplementedHmacDigestAuthOptionError(
_("The challenge doesn't contain a server nonce, or this one is empty.")
)
self.challenge["algorithm"] = self.challenge.get("algorithm", "HMAC-SHA-1")
if self.challenge["algorithm"] not in ["HMAC-SHA-1", "HMAC-MD5"]:
raise UnimplementedHmacDigestAuthOptionError(
_("Unsupported value for algorithm: %s." % self.challenge["algorithm"])
)
self.challenge["pw-algorithm"] = self.challenge.get("pw-algorithm", "SHA-1")
if self.challenge["pw-algorithm"] not in ["SHA-1", "MD5"]:
raise UnimplementedHmacDigestAuthOptionError(
_("Unsupported value for pw-algorithm: %s." % self.challenge["pw-algorithm"])
)
if self.challenge["algorithm"] == "HMAC-MD5":
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge["pw-algorithm"] == "MD5":
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join(
[
self.credentials[0],
":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge["salt"]])).hexdigest().lower(),
":",
self.challenge["realm"],
]
)
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge["snonce"], headers_val,)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers["authorization"] = (
'HMACDigest username="%s", realm="%s", snonce="%s",'
' cnonce="%s", uri="%s", created="%s", '
'response="%s", headers="%s"'
) % (
self.credentials[0],
self.challenge["realm"],
self.challenge["snonce"],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = auth._parse_www_authenticate(response, "www-authenticate").get("hmacdigest", {})
if challenge.get("reason") in ["integrity", "stale"]:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers["authorization"] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers["X-WSSE"] = ('UsernameToken Username="%s", PasswordDigest="%s", ' 'Nonce="%s", Created="%s"') % (
self.credentials[0],
password_digest,
cnonce,
iso_now,
)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = auth._parse_www_authenticate(response, "www-authenticate")
service = challenge["googlelogin"].get("service", "xapi")
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == "xapi" and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
# elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers["user-agent"],)
resp, content = self.http.request(
"https://www.google.com/accounts/ClientLogin",
method="POST",
body=urlencode(auth),
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
lines = content.split("\n")
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d["Auth"]
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers["authorization"] = "GoogleLogin Auth=" + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication,
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
def add(self, key, cert, domain, password):
self.credentials.append((domain.lower(), key, cert, password))
def iter(self, domain):
for (cdomain, key, cert, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (key, cert, password)
class AllHosts(object):
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
bypass_hosts = ()
def __init__(
self, proxy_type, proxy_host, proxy_port, proxy_rdns=True, proxy_user=None, proxy_pass=None, proxy_headers=None,
):
"""Args:
proxy_type: The type of proxy server. This must be set to one of
socks.PROXY_TYPE_XXX constants. For example: p =
ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost',
proxy_port=8000)
proxy_host: The hostname or IP address of the proxy server.
proxy_port: The port that the proxy server is running on.
proxy_rdns: If True (default), DNS queries will not be performed
locally, and instead, handed to the proxy to resolve. This is useful
if the network does not allow resolution of non-local names. In
httplib2 0.9 and earlier, this defaulted to False.
proxy_user: The username used to authenticate with the proxy server.
proxy_pass: The password used to authenticate with the proxy server.
proxy_headers: Additional or modified headers for the proxy connect
request.
"""
self.proxy_type = proxy_type
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_rdns = proxy_rdns
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
self.proxy_headers = proxy_headers
def astuple(self):
return (
self.proxy_type,
self.proxy_host,
self.proxy_port,
self.proxy_rdns,
self.proxy_user,
self.proxy_pass,
self.proxy_headers,
)
def isgood(self):
return (self.proxy_host != None) and (self.proxy_port != None)
def applies_to(self, hostname):
return not self.bypass_host(hostname)
def bypass_host(self, hostname):
"""Has this host been excluded from the proxy config"""
if self.bypass_hosts is AllHosts:
return True
hostname = "." + hostname.lstrip(".")
for skip_name in self.bypass_hosts:
# *.suffix
if skip_name.startswith(".") and hostname.endswith(skip_name):
return True
# exact match
if hostname == "." + skip_name:
return True
return False
def __repr__(self):
return (
"<ProxyInfo type={p.proxy_type} "
"host:port={p.proxy_host}:{p.proxy_port} rdns={p.proxy_rdns}"
+ " user={p.proxy_user} headers={p.proxy_headers}>"
).format(p=self)
def proxy_info_from_environment(method="http"):
"""Read proxy info from the environment variables.
"""
if method not in ["http", "https"]:
return
env_var = method + "_proxy"
url = os.environ.get(env_var, os.environ.get(env_var.upper()))
if not url:
return
return proxy_info_from_url(url, method, None)
def proxy_info_from_url(url, method="http", noproxy=None):
"""Construct a ProxyInfo from a URL (such as http_proxy env var)
"""
url = urlparse.urlparse(url)
username = None
password = None
port = None
if "@" in url[1]:
ident, host_port = url[1].split("@", 1)
if ":" in ident:
username, password = ident.split(":", 1)
else:
password = ident
else:
host_port = url[1]
if ":" in host_port:
host, port = host_port.split(":", 1)
else:
host = host_port
if port:
port = int(port)
else:
port = dict(https=443, http=80)[method]
proxy_type = 3 # socks.PROXY_TYPE_HTTP
pi = ProxyInfo(
proxy_type=proxy_type,
proxy_host=host,
proxy_port=port,
proxy_user=username or None,
proxy_pass=password or None,
proxy_headers=None,
)
bypass_hosts = []
# If not given an explicit noproxy value, respect values in env vars.
if noproxy is None:
noproxy = os.environ.get("no_proxy", os.environ.get("NO_PROXY", ""))
# Special case: A single '*' character means all hosts should be bypassed.
if noproxy == "*":
bypass_hosts = AllHosts
elif noproxy.strip():
bypass_hosts = noproxy.split(",")
bypass_hosts = filter(bool, bypass_hosts) # To exclude empty string.
pi.bypass_hosts = bypass_hosts
return pi
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""HTTPConnection subclass that supports timeouts
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
if self.proxy_info and socks is None:
raise ProxiesUnavailableError("Proxy support missing but proxy use was requested!")
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
(
proxy_type,
proxy_host,
proxy_port,
proxy_rdns,
proxy_user,
proxy_pass,
proxy_headers,
) = self.proxy_info.astuple()
host = proxy_host
port = proxy_port
else:
use_proxy = False
host = self.host
port = self.port
socket_err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if use_proxy:
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,
)
else:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print("connect: (%s, %s) ************" % (self.host, self.port))
if use_proxy:
print(
"proxy: %s ************"
% str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,))
)
if use_proxy:
self.sock.connect((self.host, self.port) + sa[2:])
else:
self.sock.connect(sa)
except socket.error as e:
socket_err = e
if self.debuglevel > 0:
print("connect fail: (%s, %s)" % (self.host, self.port))
if use_proxy:
print(
"proxy: %s"
% str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,))
)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket_err or socket.error("getaddrinfo returns an empty list")
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"""This class allows communication via SSL.
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(
self,
host,
port=None,
key_file=None,
cert_file=None,
strict=None,
timeout=None,
proxy_info=None,
ca_certs=None,
disable_ssl_certificate_validation=False,
ssl_version=None,
key_password=None,
):
if key_password:
httplib.HTTPSConnection.__init__(self, host, port=port, strict=strict)
self._context.load_cert_chain(cert_file, key_file, key_password)
self.key_file = key_file
self.cert_file = cert_file
self.key_password = key_password
else:
httplib.HTTPSConnection.__init__(
self, host, port=port, key_file=key_file, cert_file=cert_file, strict=strict
)
self.key_password = None
self.timeout = timeout
self.proxy_info = proxy_info
if ca_certs is None:
ca_certs = CA_CERTS
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = disable_ssl_certificate_validation
self.ssl_version = ssl_version
# The following two methods were adapted from https_wrapper.py, released
# with the Google Appengine SDK at
# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
# under the following license:
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def _GetValidHostsForCert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if "subjectAltName" in cert:
return [x[1] for x in cert["subjectAltName"] if x[0].lower() == "dns"]
else:
return [x[0][1] for x in cert["subject"] if x[0][0].lower() == "commonname"]
def _ValidateCertificateHostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._GetValidHostsForCert(cert)
for host in hosts:
host_re = host.replace(".", "\.").replace("*", "[^.]*")
if re.search("^%s$" % (host_re,), hostname, re.I):
return True
return False
def connect(self):
"Connect to a host on a given (SSL) port."
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
(
proxy_type,
proxy_host,
proxy_port,
proxy_rdns,
proxy_user,
proxy_pass,
proxy_headers,
) = self.proxy_info.astuple()
host = proxy_host
port = proxy_port
else:
use_proxy = False
host = self.host
port = self.port
socket_err = None
address_info = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
for family, socktype, proto, canonname, sockaddr in address_info:
try:
if use_proxy:
sock = socks.socksocket(family, socktype, proto)
sock.setproxy(
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,
)
else:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
if use_proxy:
sock.connect((self.host, self.port) + sockaddr[:2])
else:
sock.connect(sockaddr)
self.sock = _ssl_wrap_socket(
sock,
self.key_file,
self.cert_file,
self.disable_ssl_certificate_validation,
self.ca_certs,
self.ssl_version,
self.host,
self.key_password,
)
if self.debuglevel > 0:
print("connect: (%s, %s)" % (self.host, self.port))
if use_proxy:
print(
"proxy: %s"
% str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,))
)
if not self.disable_ssl_certificate_validation:
cert = self.sock.getpeercert()
hostname = self.host.split(":", 0)[0]
if not self._ValidateCertificateHostname(cert, hostname):
raise CertificateHostnameMismatch(
"Server presented certificate that does not match " "host %s: %s" % (hostname, cert),
hostname,
cert,
)
except (ssl_SSLError, ssl_CertificateError, CertificateHostnameMismatch,) as e:
if sock:
sock.close()
if self.sock:
self.sock.close()
self.sock = None
# Unfortunately the ssl module doesn't seem to provide any way
# to get at more detailed error information, in particular
# whether the error is due to certificate validation or
# something else (such as SSL protocol mismatch).
if getattr(e, "errno", None) == ssl.SSL_ERROR_SSL:
raise SSLHandshakeError(e)
else:
raise
except (socket.timeout, socket.gaierror):
raise
except socket.error as e:
socket_err = e
if self.debuglevel > 0:
print("connect fail: (%s, %s)" % (self.host, self.port))
if use_proxy:
print(
"proxy: %s"
% str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers,))
)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket_err or socket.error("getaddrinfo returns an empty list")
SCHEME_TO_CONNECTION = {
"http": HTTPConnectionWithTimeout,
"https": HTTPSConnectionWithTimeout,
}
def _new_fixed_fetch(validate_certificate):
def fixed_fetch(
url, payload=None, method="GET", headers={}, allow_truncated=False, follow_redirects=True, deadline=None,
):
return fetch(
url,
payload=payload,
method=method,
headers=headers,
allow_truncated=allow_truncated,
follow_redirects=follow_redirects,
deadline=deadline,
validate_certificate=validate_certificate,
)
return fixed_fetch
class AppEngineHttpConnection(httplib.HTTPConnection):
"""Use httplib on App Engine, but compensate for its weirdness.
The parameters key_file, cert_file, proxy_info, ca_certs,
disable_ssl_certificate_validation, and ssl_version are all dropped on
the ground.
"""
def __init__(
self,
host,
port=None,
key_file=None,
cert_file=None,
strict=None,
timeout=None,
proxy_info=None,
ca_certs=None,
disable_ssl_certificate_validation=False,
ssl_version=None,
):
httplib.HTTPConnection.__init__(self, host, port=port, strict=strict, timeout=timeout)
class AppEngineHttpsConnection(httplib.HTTPSConnection):
"""Same as AppEngineHttpConnection, but for HTTPS URIs.
The parameters proxy_info, ca_certs, disable_ssl_certificate_validation,
and ssl_version are all dropped on the ground.
"""
def __init__(
self,
host,
port=None,
key_file=None,
cert_file=None,
strict=None,
timeout=None,
proxy_info=None,
ca_certs=None,
disable_ssl_certificate_validation=False,
ssl_version=None,
key_password=None,
):
if key_password:
raise NotSupportedOnThisPlatform("Certificate with password is not supported.")
httplib.HTTPSConnection.__init__(
self, host, port=port, key_file=key_file, cert_file=cert_file, strict=strict, timeout=timeout,
)
self._fetch = _new_fixed_fetch(not disable_ssl_certificate_validation)
# Use a different connection object for Google App Engine Standard Environment.
def is_gae_instance():
server_software = os.environ.get("SERVER_SOFTWARE", "")
if (
server_software.startswith("Google App Engine/")
or server_software.startswith("Development/")
or server_software.startswith("testutil/")
):
return True
return False
try:
if not is_gae_instance():
raise NotRunningAppEngineEnvironment()
from google.appengine.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub("urlfetch") is None:
raise ImportError
from google.appengine.api.urlfetch import fetch
# Update the connection classes to use the Googel App Engine specific ones.
SCHEME_TO_CONNECTION = {
"http": AppEngineHttpConnection,
"https": AppEngineHttpsConnection,
}
except (ImportError, NotRunningAppEngineEnvironment):
pass
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(
self,
cache=None,
timeout=None,
proxy_info=proxy_info_from_environment,
ca_certs=None,
disable_ssl_certificate_validation=False,
ssl_version=None,
):
"""If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
`proxy_info` may be:
- a callable that takes the http scheme ('http' or 'https') and
returns a ProxyInfo instance per request. By default, uses
proxy_nfo_from_environment.
- a ProxyInfo instance (static proxy config).
- None (proxy disabled).
ca_certs is the path of a file containing root CA certificates for SSL
server certificate validation. By default, a CA cert file bundled with
httplib2 is used.
If disable_ssl_certificate_validation is true, SSL cert validation will
not be performed.
By default, ssl.PROTOCOL_SSLv23 will be used for the ssl version.
"""
self.proxy_info = proxy_info
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = disable_ssl_certificate_validation
self.ssl_version = ssl_version
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, basestring):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
self.redirect_codes = REDIRECT_CODES
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT", "PATCH"]
self.safe_methods = list(SAFE_METHODS)
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
# Keep Authorization: headers on a redirect.
self.forward_authorization_headers = False
def close(self):
"""Close persistent connections, clear sensitive data.
Not thread-safe, requires external synchronization against concurrent requests.
"""
existing, self.connections = self.connections, {}
for _, c in existing.iteritems():
c.close()
self.certificates.clear()
self.clear_credentials()
def __getstate__(self):
state_dict = copy.copy(self.__dict__)
# In case request is augmented by some foreign object such as
# credentials which handle auth
if "request" in state_dict:
del state_dict["request"]
if "connections" in state_dict:
del state_dict["connections"]
return state_dict
def __setstate__(self, state):
self.__dict__.update(state)
self.connections = {}
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = auth._parse_www_authenticate(response, "www-authenticate")
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if scheme in challenges:
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain, password=None):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain, password)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
i = 0
seen_bad_status_line = False
while i < RETRIES:
i += 1
try:
if hasattr(conn, "sock") and conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except ssl_SSLError:
conn.close()
raise
except socket.error as e:
err = 0
if hasattr(e, "args"):
err = getattr(e, "args")[0]
else:
err = e.errno
if err == errno.ECONNREFUSED: # Connection refused
raise
if err in (errno.ENETUNREACH, errno.EADDRNOTAVAIL) and i < RETRIES:
continue # retry on potentially transient socket errors
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
if hasattr(conn, "sock") and conn.sock is None:
if i < RETRIES - 1:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i < RETRIES - 1:
conn.close()
conn.connect()
continue
try:
response = conn.getresponse()
except httplib.BadStatusLine:
# If we get a BadStatusLine on the first try then that means
# the connection just went stale, so retry regardless of the
# number of RETRIES set.
if not seen_bad_status_line and i == 1:
i = 0
seen_bad_status_line = True
conn.close()
conn.connect()
continue
else:
conn.close()
raise
except (socket.error, httplib.HTTPException):
if i < RETRIES - 1:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
else:
content = ""
if method == "HEAD":
conn.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(
self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey,
):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if self.follow_all_redirects or method in self.safe_methods or response.status in (303, 308):
if self.follow_redirects and response.status in self.redirect_codes:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if "location" not in response and response.status != 300:
raise RedirectMissingLocation(
_("Redirected but the response is missing a Location: header."), response, content,
)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if "location" in response:
location = response["location"]
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response["location"] = urlparse.urljoin(absolute_uri, location)
if response.status == 308 or (response.status == 301 and method in self.safe_methods):
response["-x-permanent-redirect-url"] = response["location"]
if "content-location" not in response:
response["content-location"] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if "if-none-match" in headers:
del headers["if-none-match"]
if "if-modified-since" in headers:
del headers["if-modified-since"]
if "authorization" in headers and not self.forward_authorization_headers:
del headers["authorization"]
if "location" in response:
location = response["location"]
old_response = copy.deepcopy(response)
if "content-location" not in old_response:
old_response["content-location"] = absolute_uri
redirect_method = method
if response.status in [302, 303]:
redirect_method = "GET"
body = None
(response, content) = self.request(
location, method=redirect_method, body=body, headers=headers, redirections=redirections - 1,
)
response.previous = old_response
else:
raise RedirectLimit(
"Redirected more times than rediection_limit allows.", response, content,
)
elif response.status in [200, 203] and method in self.safe_methods:
# Don't cache 206's since we aren't going to handle byte range requests
if "content-location" not in response:
response["content-location"] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(
self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None,
):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin with either
'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE,
etc. There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a
string object.
Any extra headers that are to be sent with the request should be
provided in the 'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
conn_key = ""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if "user-agent" not in headers:
headers["user-agent"] = "Python-httplib2/%s (gzip)" % __version__
uri = iri2uri(uri)
# Prevent CWE-75 space injection to manipulate request via part of uri.
# Prevent CWE-93 CRLF injection to modify headers via part of uri.
uri = uri.replace(" ", "%20").replace("\r", "%0D").replace("\n", "%0A")
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
proxy_info = self._get_proxy_info(scheme, authority)
conn_key = scheme + ":" + authority
conn = self.connections.get(conn_key)
if conn is None:
if not connection_type:
connection_type = SCHEME_TO_CONNECTION[scheme]
certs = list(self.certificates.iter(authority))
if scheme == "https":
if certs:
conn = self.connections[conn_key] = connection_type(
authority,
key_file=certs[0][0],
cert_file=certs[0][1],
timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=self.disable_ssl_certificate_validation,
ssl_version=self.ssl_version,
key_password=certs[0][2],
)
else:
conn = self.connections[conn_key] = connection_type(
authority,
timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=self.disable_ssl_certificate_validation,
ssl_version=self.ssl_version,
)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout, proxy_info=proxy_info
)
conn.set_debuglevel(debuglevel)
if "range" not in headers and "accept-encoding" not in headers:
headers["accept-encoding"] = "gzip, deflate"
info = email.Message.Message()
cachekey = None
cached_value = None
if self.cache:
cachekey = defrag_uri.encode("utf-8")
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split("\r\n\r\n", 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except (IndexError, ValueError):
self.cache.delete(cachekey)
cachekey = None
cached_value = None
if (
method in self.optimistic_concurrency_methods
and self.cache
and "etag" in info
and not self.ignore_etag
and "if-match" not in headers
):
# http://www.w3.org/1999/04/Editing/
headers["if-match"] = info["etag"]
# https://tools.ietf.org/html/rfc7234
# A cache MUST invalidate the effective Request URI as well as [...] Location and Content-Location
# when a non-error status code is received in response to an unsafe request method.
if self.cache and cachekey and method not in self.safe_methods:
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in self.safe_methods and "vary" in info:
vary = info["vary"]
vary_headers = vary.lower().replace(" ", "").split(",")
for header in vary_headers:
key = "-varied-%s" % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if (
self.cache
and cached_value
and (method in self.safe_methods or info["status"] == "308")
and "range" not in headers
):
redirect_method = method
if info["status"] not in ("307", "308"):
redirect_method = "GET"
if "-x-permanent-redirect-url" in info:
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit(
"Redirected more times than rediection_limit allows.", {}, "",
)
(response, new_content) = self.request(
info["-x-permanent-redirect-url"],
method=redirect_method,
headers=headers,
redirections=redirections - 1,
)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info["status"] = "504"
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if "etag" in info and not self.ignore_etag and not "if-none-match" in headers:
headers["if-none-match"] = info["etag"]
if "last-modified" in info and not "last-modified" in headers:
headers["if-modified-since"] = info["last-modified"]
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(
conn, authority, uri, request_uri, method, body, headers, redirections, cachekey,
)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if "only-if-cached" in cc:
info["status"] = "504"
response = Response(info)
content = ""
else:
(response, content) = self._request(
conn, authority, uri, request_uri, method, body, headers, redirections, cachekey,
)
except Exception as e:
is_timeout = isinstance(e, socket.timeout)
if is_timeout:
conn = self.connections.pop(conn_key, None)
if conn:
conn.close()
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif is_timeout:
content = "Request Timeout"
response = Response({"content-type": "text/plain", "status": "408", "content-length": len(content),})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response({"content-type": "text/plain", "status": "400", "content-length": len(content),})
response.reason = "Bad Request"
else:
raise
return (response, content)
def _get_proxy_info(self, scheme, authority):
"""Return a ProxyInfo instance (or None) based on the scheme
and authority.
"""
hostname, port = urllib.splitport(authority)
proxy_info = self.proxy_info
if callable(proxy_info):
proxy_info = proxy_info(scheme)
if hasattr(proxy_info, "applies_to") and not proxy_info.applies_to(hostname):
proxy_info = None
return proxy_info
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server.
10 for HTTP/1.0, 11 for HTTP/1.1.
"""
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self["status"] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key.lower()] = value
self.status = int(self["status"])
else:
for key, value in info.iteritems():
self[key.lower()] = value
self.status = int(self.get("status", self.status))
self.reason = self.get("reason", self.reason)
def __getattr__(self, name):
if name == "dict":
return self
else:
raise AttributeError(name)
| ./CrossVul/dataset_final_sorted/CWE-400/py/good_1890_0 |
crossvul-python_data_bad_1890_1 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-400/py/bad_1890_1 |
crossvul-python_data_good_1916_1 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import urllib.parse
from io import BytesIO
from typing import (
TYPE_CHECKING,
Any,
BinaryIO,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Union,
)
import treq
from canonicaljson import encode_canonical_json
from netaddr import IPAddress, IPSet
from prometheus_client import Counter
from zope.interface import implementer, provider
from OpenSSL import SSL
from OpenSSL.SSL import VERIFY_NONE
from twisted.internet import defer, error as twisted_error, protocol, ssl
from twisted.internet.interfaces import (
IAddress,
IHostResolution,
IReactorPluggableNameResolver,
IResolutionReceiver,
)
from twisted.internet.task import Cooperator
from twisted.python.failure import Failure
from twisted.web._newclient import ResponseDone
from twisted.web.client import (
Agent,
HTTPConnectionPool,
ResponseNeverReceived,
readBody,
)
from twisted.web.http import PotentialDataLoss
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent, IBodyProducer, IResponse
from synapse.api.errors import Codes, HttpResponseException, SynapseError
from synapse.http import QuieterFileBodyProducer, RequestTimedOutError, redact_uri
from synapse.http.proxyagent import ProxyAgent
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.opentracing import set_tag, start_active_span, tags
from synapse.util import json_decoder
from synapse.util.async_helpers import timeout_deferred
if TYPE_CHECKING:
from synapse.app.homeserver import HomeServer
logger = logging.getLogger(__name__)
outgoing_requests_counter = Counter("synapse_http_client_requests", "", ["method"])
incoming_responses_counter = Counter(
"synapse_http_client_responses", "", ["method", "code"]
)
# the type of the headers list, to be passed to the t.w.h.Headers.
# Actually we can mix str and bytes keys, but Mapping treats 'key' as invariant so
# we simplify.
RawHeaders = Union[Mapping[str, "RawHeaderValue"], Mapping[bytes, "RawHeaderValue"]]
# the value actually has to be a List, but List is invariant so we can't specify that
# the entries can either be Lists or bytes.
RawHeaderValue = Sequence[Union[str, bytes]]
# the type of the query params, to be passed into `urlencode`
QueryParamValue = Union[str, bytes, Iterable[Union[str, bytes]]]
QueryParams = Union[Mapping[str, QueryParamValue], Mapping[bytes, QueryParamValue]]
def check_against_blacklist(
ip_address: IPAddress, ip_whitelist: Optional[IPSet], ip_blacklist: IPSet
) -> bool:
"""
Compares an IP address to allowed and disallowed IP sets.
Args:
ip_address: The IP address to check
ip_whitelist: Allowed IP addresses.
ip_blacklist: Disallowed IP addresses.
Returns:
True if the IP address is in the blacklist and not in the whitelist.
"""
if ip_address in ip_blacklist:
if ip_whitelist is None or ip_address not in ip_whitelist:
return True
return False
_EPSILON = 0.00000001
def _make_scheduler(reactor):
"""Makes a schedular suitable for a Cooperator using the given reactor.
(This is effectively just a copy from `twisted.internet.task`)
"""
def _scheduler(x):
return reactor.callLater(_EPSILON, x)
return _scheduler
class _IPBlacklistingResolver:
"""
A proxy for reactor.nameResolver which only produces non-blacklisted IP
addresses, preventing DNS rebinding attacks on URL preview.
"""
def __init__(
self,
reactor: IReactorPluggableNameResolver,
ip_whitelist: Optional[IPSet],
ip_blacklist: IPSet,
):
"""
Args:
reactor: The twisted reactor.
ip_whitelist: IP addresses to allow.
ip_blacklist: IP addresses to disallow.
"""
self._reactor = reactor
self._ip_whitelist = ip_whitelist
self._ip_blacklist = ip_blacklist
def resolveHostName(
self, recv: IResolutionReceiver, hostname: str, portNumber: int = 0
) -> IResolutionReceiver:
r = recv()
addresses = [] # type: List[IAddress]
def _callback() -> None:
r.resolutionBegan(None)
has_bad_ip = False
for i in addresses:
ip_address = IPAddress(i.host)
if check_against_blacklist(
ip_address, self._ip_whitelist, self._ip_blacklist
):
logger.info(
"Dropped %s from DNS resolution to %s due to blacklist"
% (ip_address, hostname)
)
has_bad_ip = True
# if we have a blacklisted IP, we'd like to raise an error to block the
# request, but all we can really do from here is claim that there were no
# valid results.
if not has_bad_ip:
for i in addresses:
r.addressResolved(i)
r.resolutionComplete()
@provider(IResolutionReceiver)
class EndpointReceiver:
@staticmethod
def resolutionBegan(resolutionInProgress: IHostResolution) -> None:
pass
@staticmethod
def addressResolved(address: IAddress) -> None:
addresses.append(address)
@staticmethod
def resolutionComplete() -> None:
_callback()
self._reactor.nameResolver.resolveHostName(
EndpointReceiver, hostname, portNumber=portNumber
)
return r
@implementer(IReactorPluggableNameResolver)
class BlacklistingReactorWrapper:
"""
A Reactor wrapper which will prevent DNS resolution to blacklisted IP
addresses, to prevent DNS rebinding.
"""
def __init__(
self,
reactor: IReactorPluggableNameResolver,
ip_whitelist: Optional[IPSet],
ip_blacklist: IPSet,
):
self._reactor = reactor
# We need to use a DNS resolver which filters out blacklisted IP
# addresses, to prevent DNS rebinding.
self._nameResolver = _IPBlacklistingResolver(
self._reactor, ip_whitelist, ip_blacklist
)
def __getattr__(self, attr: str) -> Any:
# Passthrough to the real reactor except for the DNS resolver.
if attr == "nameResolver":
return self._nameResolver
else:
return getattr(self._reactor, attr)
class BlacklistingAgentWrapper(Agent):
"""
An Agent wrapper which will prevent access to IP addresses being accessed
directly (without an IP address lookup).
"""
def __init__(
self,
agent: IAgent,
ip_whitelist: Optional[IPSet] = None,
ip_blacklist: Optional[IPSet] = None,
):
"""
Args:
agent: The Agent to wrap.
ip_whitelist: IP addresses to allow.
ip_blacklist: IP addresses to disallow.
"""
self._agent = agent
self._ip_whitelist = ip_whitelist
self._ip_blacklist = ip_blacklist
def request(
self,
method: bytes,
uri: bytes,
headers: Optional[Headers] = None,
bodyProducer: Optional[IBodyProducer] = None,
) -> defer.Deferred:
h = urllib.parse.urlparse(uri.decode("ascii"))
try:
ip_address = IPAddress(h.hostname)
if check_against_blacklist(
ip_address, self._ip_whitelist, self._ip_blacklist
):
logger.info("Blocking access to %s due to blacklist" % (ip_address,))
e = SynapseError(403, "IP address blocked by IP blacklist entry")
return defer.fail(Failure(e))
except Exception:
# Not an IP
pass
return self._agent.request(
method, uri, headers=headers, bodyProducer=bodyProducer
)
class SimpleHttpClient:
"""
A simple, no-frills HTTP client with methods that wrap up common ways of
using HTTP in Matrix
"""
def __init__(
self,
hs: "HomeServer",
treq_args: Dict[str, Any] = {},
ip_whitelist: Optional[IPSet] = None,
ip_blacklist: Optional[IPSet] = None,
http_proxy: Optional[bytes] = None,
https_proxy: Optional[bytes] = None,
):
"""
Args:
hs
treq_args: Extra keyword arguments to be given to treq.request.
ip_blacklist: The IP addresses that are blacklisted that
we may not request.
ip_whitelist: The whitelisted IP addresses, that we can
request if it were otherwise caught in a blacklist.
http_proxy: proxy server to use for http connections. host[:port]
https_proxy: proxy server to use for https connections. host[:port]
"""
self.hs = hs
self._ip_whitelist = ip_whitelist
self._ip_blacklist = ip_blacklist
self._extra_treq_args = treq_args
self.user_agent = hs.version_string
self.clock = hs.get_clock()
if hs.config.user_agent_suffix:
self.user_agent = "%s %s" % (self.user_agent, hs.config.user_agent_suffix)
# We use this for our body producers to ensure that they use the correct
# reactor.
self._cooperator = Cooperator(scheduler=_make_scheduler(hs.get_reactor()))
self.user_agent = self.user_agent.encode("ascii")
if self._ip_blacklist:
# If we have an IP blacklist, we need to use a DNS resolver which
# filters out blacklisted IP addresses, to prevent DNS rebinding.
self.reactor = BlacklistingReactorWrapper(
hs.get_reactor(), self._ip_whitelist, self._ip_blacklist
)
else:
self.reactor = hs.get_reactor()
# the pusher makes lots of concurrent SSL connections to sygnal, and
# tends to do so in batches, so we need to allow the pool to keep
# lots of idle connections around.
pool = HTTPConnectionPool(self.reactor)
# XXX: The justification for using the cache factor here is that larger instances
# will need both more cache and more connections.
# Still, this should probably be a separate dial
pool.maxPersistentPerHost = max((100 * hs.config.caches.global_factor, 5))
pool.cachedConnectionTimeout = 2 * 60
self.agent = ProxyAgent(
self.reactor,
connectTimeout=15,
contextFactory=self.hs.get_http_client_context_factory(),
pool=pool,
http_proxy=http_proxy,
https_proxy=https_proxy,
)
if self._ip_blacklist:
# If we have an IP blacklist, we then install the blacklisting Agent
# which prevents direct access to IP addresses, that are not caught
# by the DNS resolution.
self.agent = BlacklistingAgentWrapper(
self.agent,
ip_whitelist=self._ip_whitelist,
ip_blacklist=self._ip_blacklist,
)
async def request(
self,
method: str,
uri: str,
data: Optional[bytes] = None,
headers: Optional[Headers] = None,
) -> IResponse:
"""
Args:
method: HTTP method to use.
uri: URI to query.
data: Data to send in the request body, if applicable.
headers: Request headers.
Returns:
Response object, once the headers have been read.
Raises:
RequestTimedOutError if the request times out before the headers are read
"""
outgoing_requests_counter.labels(method).inc()
# log request but strip `access_token` (AS requests for example include this)
logger.debug("Sending request %s %s", method, redact_uri(uri))
with start_active_span(
"outgoing-client-request",
tags={
tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT,
tags.HTTP_METHOD: method,
tags.HTTP_URL: uri,
},
finish_on_close=True,
):
try:
body_producer = None
if data is not None:
body_producer = QuieterFileBodyProducer(
BytesIO(data), cooperator=self._cooperator,
)
request_deferred = treq.request(
method,
uri,
agent=self.agent,
data=body_producer,
headers=headers,
**self._extra_treq_args,
) # type: defer.Deferred
# we use our own timeout mechanism rather than treq's as a workaround
# for https://twistedmatrix.com/trac/ticket/9534.
request_deferred = timeout_deferred(
request_deferred, 60, self.hs.get_reactor(),
)
# turn timeouts into RequestTimedOutErrors
request_deferred.addErrback(_timeout_to_request_timed_out_error)
response = await make_deferred_yieldable(request_deferred)
incoming_responses_counter.labels(method, response.code).inc()
logger.info(
"Received response to %s %s: %s",
method,
redact_uri(uri),
response.code,
)
return response
except Exception as e:
incoming_responses_counter.labels(method, "ERR").inc()
logger.info(
"Error sending request to %s %s: %s %s",
method,
redact_uri(uri),
type(e).__name__,
e.args[0],
)
set_tag(tags.ERROR, True)
set_tag("error_reason", e.args[0])
raise
async def post_urlencoded_get_json(
self,
uri: str,
args: Optional[Mapping[str, Union[str, List[str]]]] = None,
headers: Optional[RawHeaders] = None,
) -> Any:
"""
Args:
uri: uri to query
args: parameters to be url-encoded in the body
headers: a map from header name to a list of values for that header
Returns:
parsed json
Raises:
RequestTimedOutError: if there is a timeout before the response headers
are received. Note there is currently no timeout on reading the response
body.
HttpResponseException: On a non-2xx HTTP response.
ValueError: if the response was not JSON
"""
# TODO: Do we ever want to log message contents?
logger.debug("post_urlencoded_get_json args: %s", args)
query_bytes = encode_query_args(args)
actual_headers = {
b"Content-Type": [b"application/x-www-form-urlencoded"],
b"User-Agent": [self.user_agent],
b"Accept": [b"application/json"],
}
if headers:
actual_headers.update(headers) # type: ignore
response = await self.request(
"POST", uri, headers=Headers(actual_headers), data=query_bytes
)
body = await make_deferred_yieldable(readBody(response))
if 200 <= response.code < 300:
return json_decoder.decode(body.decode("utf-8"))
else:
raise HttpResponseException(
response.code, response.phrase.decode("ascii", errors="replace"), body
)
async def post_json_get_json(
self, uri: str, post_json: Any, headers: Optional[RawHeaders] = None
) -> Any:
"""
Args:
uri: URI to query.
post_json: request body, to be encoded as json
headers: a map from header name to a list of values for that header
Returns:
parsed json
Raises:
RequestTimedOutError: if there is a timeout before the response headers
are received. Note there is currently no timeout on reading the response
body.
HttpResponseException: On a non-2xx HTTP response.
ValueError: if the response was not JSON
"""
json_str = encode_canonical_json(post_json)
logger.debug("HTTP POST %s -> %s", json_str, uri)
actual_headers = {
b"Content-Type": [b"application/json"],
b"User-Agent": [self.user_agent],
b"Accept": [b"application/json"],
}
if headers:
actual_headers.update(headers) # type: ignore
response = await self.request(
"POST", uri, headers=Headers(actual_headers), data=json_str
)
body = await make_deferred_yieldable(readBody(response))
if 200 <= response.code < 300:
return json_decoder.decode(body.decode("utf-8"))
else:
raise HttpResponseException(
response.code, response.phrase.decode("ascii", errors="replace"), body
)
async def get_json(
self,
uri: str,
args: Optional[QueryParams] = None,
headers: Optional[RawHeaders] = None,
) -> Any:
"""Gets some json from the given URI.
Args:
uri: The URI to request, not including query parameters
args: A dictionary used to create query string
headers: a map from header name to a list of values for that header
Returns:
Succeeds when we get a 2xx HTTP response, with the HTTP body as JSON.
Raises:
RequestTimedOutError: if there is a timeout before the response headers
are received. Note there is currently no timeout on reading the response
body.
HttpResponseException On a non-2xx HTTP response.
ValueError: if the response was not JSON
"""
actual_headers = {b"Accept": [b"application/json"]}
if headers:
actual_headers.update(headers) # type: ignore
body = await self.get_raw(uri, args, headers=headers)
return json_decoder.decode(body.decode("utf-8"))
async def put_json(
self,
uri: str,
json_body: Any,
args: Optional[QueryParams] = None,
headers: RawHeaders = None,
) -> Any:
"""Puts some json to the given URI.
Args:
uri: The URI to request, not including query parameters
json_body: The JSON to put in the HTTP body,
args: A dictionary used to create query strings
headers: a map from header name to a list of values for that header
Returns:
Succeeds when we get a 2xx HTTP response, with the HTTP body as JSON.
Raises:
RequestTimedOutError: if there is a timeout before the response headers
are received. Note there is currently no timeout on reading the response
body.
HttpResponseException On a non-2xx HTTP response.
ValueError: if the response was not JSON
"""
if args:
query_str = urllib.parse.urlencode(args, True)
uri = "%s?%s" % (uri, query_str)
json_str = encode_canonical_json(json_body)
actual_headers = {
b"Content-Type": [b"application/json"],
b"User-Agent": [self.user_agent],
b"Accept": [b"application/json"],
}
if headers:
actual_headers.update(headers) # type: ignore
response = await self.request(
"PUT", uri, headers=Headers(actual_headers), data=json_str
)
body = await make_deferred_yieldable(readBody(response))
if 200 <= response.code < 300:
return json_decoder.decode(body.decode("utf-8"))
else:
raise HttpResponseException(
response.code, response.phrase.decode("ascii", errors="replace"), body
)
async def get_raw(
self,
uri: str,
args: Optional[QueryParams] = None,
headers: Optional[RawHeaders] = None,
) -> bytes:
"""Gets raw text from the given URI.
Args:
uri: The URI to request, not including query parameters
args: A dictionary used to create query strings
headers: a map from header name to a list of values for that header
Returns:
Succeeds when we get a 2xx HTTP response, with the
HTTP body as bytes.
Raises:
RequestTimedOutError: if there is a timeout before the response headers
are received. Note there is currently no timeout on reading the response
body.
HttpResponseException on a non-2xx HTTP response.
"""
if args:
query_str = urllib.parse.urlencode(args, True)
uri = "%s?%s" % (uri, query_str)
actual_headers = {b"User-Agent": [self.user_agent]}
if headers:
actual_headers.update(headers) # type: ignore
response = await self.request("GET", uri, headers=Headers(actual_headers))
body = await make_deferred_yieldable(readBody(response))
if 200 <= response.code < 300:
return body
else:
raise HttpResponseException(
response.code, response.phrase.decode("ascii", errors="replace"), body
)
# XXX: FIXME: This is horribly copy-pasted from matrixfederationclient.
# The two should be factored out.
async def get_file(
self,
url: str,
output_stream: BinaryIO,
max_size: Optional[int] = None,
headers: Optional[RawHeaders] = None,
) -> Tuple[int, Dict[bytes, List[bytes]], str, int]:
"""GETs a file from a given URL
Args:
url: The URL to GET
output_stream: File to write the response body to.
headers: A map from header name to a list of values for that header
Returns:
A tuple of the file length, dict of the response
headers, absolute URI of the response and HTTP response code.
Raises:
RequestTimedOutError: if there is a timeout before the response headers
are received. Note there is currently no timeout on reading the response
body.
SynapseError: if the response is not a 2xx, the remote file is too large, or
another exception happens during the download.
"""
actual_headers = {b"User-Agent": [self.user_agent]}
if headers:
actual_headers.update(headers) # type: ignore
response = await self.request("GET", url, headers=Headers(actual_headers))
resp_headers = dict(response.headers.getAllRawHeaders())
if (
b"Content-Length" in resp_headers
and max_size
and int(resp_headers[b"Content-Length"][0]) > max_size
):
logger.warning("Requested URL is too large > %r bytes" % (max_size,))
raise SynapseError(
502,
"Requested file is too large > %r bytes" % (max_size,),
Codes.TOO_LARGE,
)
if response.code > 299:
logger.warning("Got %d when downloading %s" % (response.code, url))
raise SynapseError(502, "Got error %d" % (response.code,), Codes.UNKNOWN)
# TODO: if our Content-Type is HTML or something, just read the first
# N bytes into RAM rather than saving it all to disk only to read it
# straight back in again
try:
length = await make_deferred_yieldable(
read_body_with_max_size(response, output_stream, max_size)
)
except BodyExceededMaxSize:
SynapseError(
502,
"Requested file is too large > %r bytes" % (max_size,),
Codes.TOO_LARGE,
)
except Exception as e:
raise SynapseError(502, ("Failed to download remote body: %s" % e)) from e
return (
length,
resp_headers,
response.request.absoluteURI.decode("ascii"),
response.code,
)
def _timeout_to_request_timed_out_error(f: Failure):
if f.check(twisted_error.TimeoutError, twisted_error.ConnectingCancelledError):
# The TCP connection has its own timeout (set by the 'connectTimeout' param
# on the Agent), which raises twisted_error.TimeoutError exception.
raise RequestTimedOutError("Timeout connecting to remote server")
elif f.check(defer.TimeoutError, ResponseNeverReceived):
# this one means that we hit our overall timeout on the request
raise RequestTimedOutError("Timeout waiting for response from remote server")
return f
class BodyExceededMaxSize(Exception):
"""The maximum allowed size of the HTTP body was exceeded."""
class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
def __init__(
self, stream: BinaryIO, deferred: defer.Deferred, max_size: Optional[int]
):
self.stream = stream
self.deferred = deferred
self.length = 0
self.max_size = max_size
def dataReceived(self, data: bytes) -> None:
self.stream.write(data)
self.length += len(data)
if self.max_size is not None and self.length >= self.max_size:
self.deferred.errback(BodyExceededMaxSize())
self.deferred = defer.Deferred()
self.transport.loseConnection()
def connectionLost(self, reason: Failure) -> None:
if reason.check(ResponseDone):
self.deferred.callback(self.length)
elif reason.check(PotentialDataLoss):
# stolen from https://github.com/twisted/treq/pull/49/files
# http://twistedmatrix.com/trac/ticket/4840
self.deferred.callback(self.length)
else:
self.deferred.errback(reason)
def read_body_with_max_size(
response: IResponse, stream: BinaryIO, max_size: Optional[int]
) -> defer.Deferred:
"""
Read a HTTP response body to a file-object. Optionally enforcing a maximum file size.
If the maximum file size is reached, the returned Deferred will resolve to a
Failure with a BodyExceededMaxSize exception.
Args:
response: The HTTP response to read from.
stream: The file-object to write to.
max_size: The maximum file size to allow.
Returns:
A Deferred which resolves to the length of the read body.
"""
d = defer.Deferred()
response.deliverBody(_ReadBodyWithMaxSizeProtocol(stream, d, max_size))
return d
def encode_query_args(args: Optional[Mapping[str, Union[str, List[str]]]]) -> bytes:
"""
Encodes a map of query arguments to bytes which can be appended to a URL.
Args:
args: The query arguments, a mapping of string to string or list of strings.
Returns:
The query arguments encoded as bytes.
"""
if args is None:
return b""
encoded_args = {}
for k, vs in args.items():
if isinstance(vs, str):
vs = [vs]
encoded_args[k] = [v.encode("utf8") for v in vs]
query_str = urllib.parse.urlencode(encoded_args, True)
return query_str.encode("utf8")
class InsecureInterceptableContextFactory(ssl.ContextFactory):
"""
Factory for PyOpenSSL SSL contexts which accepts any certificate for any domain.
Do not use this since it allows an attacker to intercept your communications.
"""
def __init__(self):
self._context = SSL.Context(SSL.SSLv23_METHOD)
self._context.set_verify(VERIFY_NONE, lambda *_: None)
def getContext(self, hostname=None, port=None):
return self._context
def creatorForNetloc(self, hostname, port):
return self
| ./CrossVul/dataset_final_sorted/CWE-400/py/good_1916_1 |
crossvul-python_data_good_4542_0 | """
This contains a bunch of RFC7230 definitions and regular expressions that are
needed to properly parse HTTP messages.
"""
import re
from .compat import tobytes
WS = "[ \t]"
OWS = WS + "{0,}?"
RWS = WS + "{1,}?"
BWS = OWS
# RFC 7230 Section 3.2.6 "Field Value Components":
# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
# / DIGIT / ALPHA
# obs-text = %x80-FF
TCHAR = r"[!#$%&'*+\-.^_`|~0-9A-Za-z]"
OBS_TEXT = r"\x80-\xff"
TOKEN = TCHAR + "{1,}"
# RFC 5234 Appendix B.1 "Core Rules":
# VCHAR = %x21-7E
# ; visible (printing) characters
VCHAR = r"\x21-\x7e"
# header-field = field-name ":" OWS field-value OWS
# field-name = token
# field-value = *( field-content / obs-fold )
# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
# field-vchar = VCHAR / obs-text
# Errata from: https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189
# changes field-content to:
#
# field-content = field-vchar [ 1*( SP / HTAB / field-vchar )
# field-vchar ]
FIELD_VCHAR = "[" + VCHAR + OBS_TEXT + "]"
# Field content is more greedy than the ABNF, in that it will match the whole value
FIELD_CONTENT = FIELD_VCHAR + "+(?:[ \t]+" + FIELD_VCHAR + "+)*"
# Which allows the field value here to just see if there is even a value in the first place
FIELD_VALUE = "(?:" + FIELD_CONTENT + ")?"
HEADER_FIELD = re.compile(
tobytes(
"^(?P<name>" + TOKEN + "):" + OWS + "(?P<value>" + FIELD_VALUE + ")" + OWS + "$"
)
)
| ./CrossVul/dataset_final_sorted/CWE-400/py/good_4542_0 |
crossvul-python_data_bad_1916_2 | # -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
import time
from typing import Callable, Dict, Optional, Tuple
import attr
from twisted.internet import defer
from twisted.internet.interfaces import IReactorTime
from twisted.web.client import RedirectAgent, readBody
from twisted.web.http import stringToDatetime
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent, IResponse
from synapse.logging.context import make_deferred_yieldable
from synapse.util import Clock, json_decoder
from synapse.util.caches.ttlcache import TTLCache
from synapse.util.metrics import Measure
# period to cache .well-known results for by default
WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600
# jitter factor to add to the .well-known default cache ttls
WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 0.1
# period to cache failure to fetch .well-known for
WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600
# period to cache failure to fetch .well-known if there has recently been a
# valid well-known for that domain.
WELL_KNOWN_DOWN_CACHE_PERIOD = 2 * 60
# period to remember there was a valid well-known after valid record expires
WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID = 2 * 3600
# cap for .well-known cache period
WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600
# lower bound for .well-known cache period
WELL_KNOWN_MIN_CACHE_PERIOD = 5 * 60
# Attempt to refetch a cached well-known N% of the TTL before it expires.
# e.g. if set to 0.2 and we have a cached entry with a TTL of 5mins, then
# we'll start trying to refetch 1 minute before it expires.
WELL_KNOWN_GRACE_PERIOD_FACTOR = 0.2
# Number of times we retry fetching a well-known for a domain we know recently
# had a valid entry.
WELL_KNOWN_RETRY_ATTEMPTS = 3
logger = logging.getLogger(__name__)
_well_known_cache = TTLCache("well-known")
_had_valid_well_known_cache = TTLCache("had-valid-well-known")
@attr.s(slots=True, frozen=True)
class WellKnownLookupResult:
delegated_server = attr.ib()
class WellKnownResolver:
"""Handles well-known lookups for matrix servers.
"""
def __init__(
self,
reactor: IReactorTime,
agent: IAgent,
user_agent: bytes,
well_known_cache: Optional[TTLCache] = None,
had_well_known_cache: Optional[TTLCache] = None,
):
self._reactor = reactor
self._clock = Clock(reactor)
if well_known_cache is None:
well_known_cache = _well_known_cache
if had_well_known_cache is None:
had_well_known_cache = _had_valid_well_known_cache
self._well_known_cache = well_known_cache
self._had_valid_well_known_cache = had_well_known_cache
self._well_known_agent = RedirectAgent(agent)
self.user_agent = user_agent
async def get_well_known(self, server_name: bytes) -> WellKnownLookupResult:
"""Attempt to fetch and parse a .well-known file for the given server
Args:
server_name: name of the server, from the requested url
Returns:
The result of the lookup
"""
try:
prev_result, expiry, ttl = self._well_known_cache.get_with_expiry(
server_name
)
now = self._clock.time()
if now < expiry - WELL_KNOWN_GRACE_PERIOD_FACTOR * ttl:
return WellKnownLookupResult(delegated_server=prev_result)
except KeyError:
prev_result = None
# TODO: should we linearise so that we don't end up doing two .well-known
# requests for the same server in parallel?
try:
with Measure(self._clock, "get_well_known"):
result, cache_period = await self._fetch_well_known(
server_name
) # type: Optional[bytes], float
except _FetchWellKnownFailure as e:
if prev_result and e.temporary:
# This is a temporary failure and we have a still valid cached
# result, so lets return that. Hopefully the next time we ask
# the remote will be back up again.
return WellKnownLookupResult(delegated_server=prev_result)
result = None
if self._had_valid_well_known_cache.get(server_name, False):
# We have recently seen a valid well-known record for this
# server, so we cache the lack of well-known for a shorter time.
cache_period = WELL_KNOWN_DOWN_CACHE_PERIOD
else:
cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD
# add some randomness to the TTL to avoid a stampeding herd
cache_period *= random.uniform(
1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
)
if cache_period > 0:
self._well_known_cache.set(server_name, result, cache_period)
return WellKnownLookupResult(delegated_server=result)
async def _fetch_well_known(self, server_name: bytes) -> Tuple[bytes, float]:
"""Actually fetch and parse a .well-known, without checking the cache
Args:
server_name: name of the server, from the requested url
Raises:
_FetchWellKnownFailure if we fail to lookup a result
Returns:
The lookup result and cache period.
"""
had_valid_well_known = self._had_valid_well_known_cache.get(server_name, False)
# We do this in two steps to differentiate between possibly transient
# errors (e.g. can't connect to host, 503 response) and more permanent
# errors (such as getting a 404 response).
response, body = await self._make_well_known_request(
server_name, retry=had_valid_well_known
)
try:
if response.code != 200:
raise Exception("Non-200 response %s" % (response.code,))
parsed_body = json_decoder.decode(body.decode("utf-8"))
logger.info("Response from .well-known: %s", parsed_body)
result = parsed_body["m.server"].encode("ascii")
except defer.CancelledError:
# Bail if we've been cancelled
raise
except Exception as e:
logger.info("Error parsing well-known for %s: %s", server_name, e)
raise _FetchWellKnownFailure(temporary=False)
cache_period = _cache_period_from_headers(
response.headers, time_now=self._reactor.seconds
)
if cache_period is None:
cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD
# add some randomness to the TTL to avoid a stampeding herd every 24 hours
# after startup
cache_period *= random.uniform(
1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
)
else:
cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD)
cache_period = max(cache_period, WELL_KNOWN_MIN_CACHE_PERIOD)
# We got a success, mark as such in the cache
self._had_valid_well_known_cache.set(
server_name,
bool(result),
cache_period + WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID,
)
return result, cache_period
async def _make_well_known_request(
self, server_name: bytes, retry: bool
) -> Tuple[IResponse, bytes]:
"""Make the well known request.
This will retry the request if requested and it fails (with unable
to connect or receives a 5xx error).
Args:
server_name: name of the server, from the requested url
retry: Whether to retry the request if it fails.
Returns:
Returns the response object and body. Response may be a non-200 response.
"""
uri = b"https://%s/.well-known/matrix/server" % (server_name,)
uri_str = uri.decode("ascii")
headers = {
b"User-Agent": [self.user_agent],
}
i = 0
while True:
i += 1
logger.info("Fetching %s", uri_str)
try:
response = await make_deferred_yieldable(
self._well_known_agent.request(
b"GET", uri, headers=Headers(headers)
)
)
body = await make_deferred_yieldable(readBody(response))
if 500 <= response.code < 600:
raise Exception("Non-200 response %s" % (response.code,))
return response, body
except defer.CancelledError:
# Bail if we've been cancelled
raise
except Exception as e:
if not retry or i >= WELL_KNOWN_RETRY_ATTEMPTS:
logger.info("Error fetching %s: %s", uri_str, e)
raise _FetchWellKnownFailure(temporary=True)
logger.info("Error fetching %s: %s. Retrying", uri_str, e)
# Sleep briefly in the hopes that they come back up
await self._clock.sleep(0.5)
def _cache_period_from_headers(
headers: Headers, time_now: Callable[[], float] = time.time
) -> Optional[float]:
cache_controls = _parse_cache_control(headers)
if b"no-store" in cache_controls:
return 0
if b"max-age" in cache_controls:
max_age = cache_controls[b"max-age"]
if max_age:
try:
return int(max_age)
except ValueError:
pass
expires = headers.getRawHeaders(b"expires")
if expires is not None:
try:
expires_date = stringToDatetime(expires[-1])
return expires_date - time_now()
except ValueError:
# RFC7234 says 'A cache recipient MUST interpret invalid date formats,
# especially the value "0", as representing a time in the past (i.e.,
# "already expired").
return 0
return None
def _parse_cache_control(headers: Headers) -> Dict[bytes, Optional[bytes]]:
cache_controls = {}
for hdr in headers.getRawHeaders(b"cache-control", []):
for directive in hdr.split(b","):
splits = [x.strip() for x in directive.split(b"=", 1)]
k = splits[0].lower()
v = splits[1] if len(splits) > 1 else None
cache_controls[k] = v
return cache_controls
@attr.s(slots=True)
class _FetchWellKnownFailure(Exception):
# True if we didn't get a non-5xx HTTP response, i.e. this may or may not be
# a temporary failure.
temporary = attr.ib()
| ./CrossVul/dataset_final_sorted/CWE-400/py/bad_1916_2 |
crossvul-python_data_bad_4602_1 | from __future__ import absolute_import
import re
from collections import namedtuple
from ..exceptions import LocationParseError
from ..packages import six, rfc3986
from ..packages.rfc3986.exceptions import RFC3986Exception, ValidationError
from ..packages.rfc3986.validators import Validator
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
# We only want to normalize urls with an HTTP(S) scheme.
# urllib3 infers URLs without a scheme (None) to be http.
NORMALIZABLE_SCHEMES = ('http', 'https', None)
# Regex for detecting URLs with schemes. RFC 3986 Section 3.1
SCHEME_REGEX = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+\-]*:|/)")
class Url(namedtuple('Url', url_attrs)):
"""
Data structure for representing an HTTP URL. Used as a return value for
:func:`parse_url`. Both the scheme and host are normalized as they are
both case-insensitive according to RFC 3986.
"""
__slots__ = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
if scheme is not None:
scheme = scheme.lower()
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = u''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + u'://'
if auth is not None:
url += auth + u'@'
if host is not None:
url += host
if port is not None:
url += u':' + str(port)
if path is not None:
url += path
if query is not None:
url += u'?' + query
if fragment is not None:
url += u'#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
.. deprecated:: 1.25
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx + 1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
This parser is RFC 3986 compliant.
:param str url: URL to parse into a :class:`.Url` namedtuple.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
if not url:
# Empty
return Url()
is_string = not isinstance(url, six.binary_type)
if not is_string:
url = url.decode("utf-8")
# RFC 3986 doesn't like URLs that have a host but don't start
# with a scheme and we support URLs like that so we need to
# detect that problem and add an empty scheme indication.
# We don't get hurt on path-only URLs here as it's stripped
# off and given an empty scheme anyways.
if not SCHEME_REGEX.search(url):
url = "//" + url
try:
iri_ref = rfc3986.IRIReference.from_string(url, encoding="utf-8")
except (ValueError, RFC3986Exception):
six.raise_from(LocationParseError(url), None)
def idna_encode(name):
if name and any([ord(x) > 128 for x in name]):
try:
import idna
except ImportError:
raise LocationParseError("Unable to parse URL without the 'idna' module")
try:
return idna.encode(name.lower(), strict=True, std3_rules=True)
except idna.IDNAError:
raise LocationParseError(u"Name '%s' is not a valid IDNA label" % name)
return name
has_authority = iri_ref.authority is not None
uri_ref = iri_ref.encode(idna_encoder=idna_encode)
# rfc3986 strips the authority if it's invalid
if has_authority and uri_ref.authority is None:
raise LocationParseError(url)
# Only normalize schemes we understand to not break http+unix
# or other schemes that don't follow RFC 3986.
if uri_ref.scheme is None or uri_ref.scheme.lower() in NORMALIZABLE_SCHEMES:
uri_ref = uri_ref.normalize()
# Validate all URIReference components and ensure that all
# components that were set before are still set after
# normalization has completed.
validator = Validator()
try:
validator.check_validity_of(
*validator.COMPONENT_NAMES
).validate(uri_ref)
except ValidationError:
six.raise_from(LocationParseError(url), None)
# For the sake of backwards compatibility we put empty
# string values for path if there are any defined values
# beyond the path in the URL.
# TODO: Remove this when we break backwards compatibility.
path = uri_ref.path
if not path:
if (uri_ref.query is not None
or uri_ref.fragment is not None):
path = ""
else:
path = None
# Ensure that each part of the URL is a `str` for
# backwards compatibility.
def to_input_type(x):
if x is None:
return None
elif not is_string and not isinstance(x, six.binary_type):
return x.encode('utf-8')
return x
return Url(
scheme=to_input_type(uri_ref.scheme),
auth=to_input_type(uri_ref.userinfo),
host=to_input_type(uri_ref.host),
port=int(uri_ref.port) if uri_ref.port is not None else None,
path=to_input_type(path),
query=to_input_type(uri_ref.query),
fragment=to_input_type(uri_ref.fragment)
)
def get_host(url):
"""
Deprecated. Use :func:`parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
| ./CrossVul/dataset_final_sorted/CWE-400/py/bad_4602_1 |
crossvul-python_data_bad_1916_3 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import logging
import random
import sys
import urllib.parse
from io import BytesIO
from typing import Callable, Dict, List, Optional, Tuple, Union
import attr
import treq
from canonicaljson import encode_canonical_json
from prometheus_client import Counter
from signedjson.sign import sign_json
from twisted.internet import defer
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import IReactorTime
from twisted.internet.task import _EPSILON, Cooperator
from twisted.web.http_headers import Headers
from twisted.web.iweb import IBodyProducer, IResponse
import synapse.metrics
import synapse.util.retryutils
from synapse.api.errors import (
FederationDeniedError,
HttpResponseException,
RequestSendFailed,
)
from synapse.http import QuieterFileBodyProducer
from synapse.http.client import (
BlacklistingAgentWrapper,
BlacklistingReactorWrapper,
encode_query_args,
readBodyToFile,
)
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.opentracing import (
inject_active_span_byte_dict,
set_tag,
start_active_span,
tags,
)
from synapse.types import JsonDict
from synapse.util import json_decoder
from synapse.util.async_helpers import timeout_deferred
from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)
outgoing_requests_counter = Counter(
"synapse_http_matrixfederationclient_requests", "", ["method"]
)
incoming_responses_counter = Counter(
"synapse_http_matrixfederationclient_responses", "", ["method", "code"]
)
MAX_LONG_RETRIES = 10
MAX_SHORT_RETRIES = 3
MAXINT = sys.maxsize
_next_id = 1
QueryArgs = Dict[str, Union[str, List[str]]]
@attr.s(slots=True, frozen=True)
class MatrixFederationRequest:
method = attr.ib(type=str)
"""HTTP method
"""
path = attr.ib(type=str)
"""HTTP path
"""
destination = attr.ib(type=str)
"""The remote server to send the HTTP request to.
"""
json = attr.ib(default=None, type=Optional[JsonDict])
"""JSON to send in the body.
"""
json_callback = attr.ib(default=None, type=Optional[Callable[[], JsonDict]])
"""A callback to generate the JSON.
"""
query = attr.ib(default=None, type=Optional[dict])
"""Query arguments.
"""
txn_id = attr.ib(default=None, type=Optional[str])
"""Unique ID for this request (for logging)
"""
uri = attr.ib(init=False, type=bytes)
"""The URI of this request
"""
def __attrs_post_init__(self) -> None:
global _next_id
txn_id = "%s-O-%s" % (self.method, _next_id)
_next_id = (_next_id + 1) % (MAXINT - 1)
object.__setattr__(self, "txn_id", txn_id)
destination_bytes = self.destination.encode("ascii")
path_bytes = self.path.encode("ascii")
if self.query:
query_bytes = encode_query_args(self.query)
else:
query_bytes = b""
# The object is frozen so we can pre-compute this.
uri = urllib.parse.urlunparse(
(b"matrix", destination_bytes, path_bytes, None, query_bytes, b"")
)
object.__setattr__(self, "uri", uri)
def get_json(self) -> Optional[JsonDict]:
if self.json_callback:
return self.json_callback()
return self.json
async def _handle_json_response(
reactor: IReactorTime,
timeout_sec: float,
request: MatrixFederationRequest,
response: IResponse,
start_ms: int,
) -> JsonDict:
"""
Reads the JSON body of a response, with a timeout
Args:
reactor: twisted reactor, for the timeout
timeout_sec: number of seconds to wait for response to complete
request: the request that triggered the response
response: response to the request
start_ms: Timestamp when request was made
Returns:
The parsed JSON response
"""
try:
check_content_type_is_json(response.headers)
# Use the custom JSON decoder (partially re-implements treq.json_content).
d = treq.text_content(response, encoding="utf-8")
d.addCallback(json_decoder.decode)
d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor)
body = await make_deferred_yieldable(d)
except defer.TimeoutError as e:
logger.warning(
"{%s} [%s] Timed out reading response - %s %s",
request.txn_id,
request.destination,
request.method,
request.uri.decode("ascii"),
)
raise RequestSendFailed(e, can_retry=True) from e
except Exception as e:
logger.warning(
"{%s} [%s] Error reading response %s %s: %s",
request.txn_id,
request.destination,
request.method,
request.uri.decode("ascii"),
e,
)
raise
time_taken_secs = reactor.seconds() - start_ms / 1000
logger.info(
"{%s} [%s] Completed request: %d %s in %.2f secs - %s %s",
request.txn_id,
request.destination,
response.code,
response.phrase.decode("ascii", errors="replace"),
time_taken_secs,
request.method,
request.uri.decode("ascii"),
)
return body
class MatrixFederationHttpClient:
"""HTTP client used to talk to other homeservers over the federation
protocol. Send client certificates and signs requests.
Attributes:
agent (twisted.web.client.Agent): The twisted Agent used to send the
requests.
"""
def __init__(self, hs, tls_client_options_factory):
self.hs = hs
self.signing_key = hs.signing_key
self.server_name = hs.hostname
# We need to use a DNS resolver which filters out blacklisted IP
# addresses, to prevent DNS rebinding.
self.reactor = BlacklistingReactorWrapper(
hs.get_reactor(), None, hs.config.federation_ip_range_blacklist
)
user_agent = hs.version_string
if hs.config.user_agent_suffix:
user_agent = "%s %s" % (user_agent, hs.config.user_agent_suffix)
user_agent = user_agent.encode("ascii")
self.agent = MatrixFederationAgent(
self.reactor,
tls_client_options_factory,
user_agent,
hs.config.federation_ip_range_blacklist,
)
# Use a BlacklistingAgentWrapper to prevent circumventing the IP
# blacklist via IP literals in server names
self.agent = BlacklistingAgentWrapper(
self.agent, ip_blacklist=hs.config.federation_ip_range_blacklist,
)
self.clock = hs.get_clock()
self._store = hs.get_datastore()
self.version_string_bytes = hs.version_string.encode("ascii")
self.default_timeout = 60
def schedule(x):
self.reactor.callLater(_EPSILON, x)
self._cooperator = Cooperator(scheduler=schedule)
async def _send_request_with_optional_trailing_slash(
self,
request: MatrixFederationRequest,
try_trailing_slash_on_400: bool = False,
**send_request_args
) -> IResponse:
"""Wrapper for _send_request which can optionally retry the request
upon receiving a combination of a 400 HTTP response code and a
'M_UNRECOGNIZED' errcode. This is a workaround for Synapse <= v0.99.3
due to #3622.
Args:
request: details of request to be sent
try_trailing_slash_on_400: Whether on receiving a 400
'M_UNRECOGNIZED' from the server to retry the request with a
trailing slash appended to the request path.
send_request_args: A dictionary of arguments to pass to `_send_request()`.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
Returns:
Parsed JSON response body.
"""
try:
response = await self._send_request(request, **send_request_args)
except HttpResponseException as e:
# Received an HTTP error > 300. Check if it meets the requirements
# to retry with a trailing slash
if not try_trailing_slash_on_400:
raise
if e.code != 400 or e.to_synapse_error().errcode != "M_UNRECOGNIZED":
raise
# Retry with a trailing slash if we received a 400 with
# 'M_UNRECOGNIZED' which some endpoints can return when omitting a
# trailing slash on Synapse <= v0.99.3.
logger.info("Retrying request with trailing slash")
# Request is frozen so we create a new instance
request = attr.evolve(request, path=request.path + "/")
response = await self._send_request(request, **send_request_args)
return response
async def _send_request(
self,
request: MatrixFederationRequest,
retry_on_dns_fail: bool = True,
timeout: Optional[int] = None,
long_retries: bool = False,
ignore_backoff: bool = False,
backoff_on_404: bool = False,
) -> IResponse:
"""
Sends a request to the given server.
Args:
request: details of request to be sent
retry_on_dns_fail: true if the request should be retied on DNS failures
timeout: number of milliseconds to wait for the response headers
(including connecting to the server), *for each attempt*.
60s by default.
long_retries: whether to use the long retry algorithm.
The regular retry algorithm makes 4 attempts, with intervals
[0.5s, 1s, 2s].
The long retry algorithm makes 11 attempts, with intervals
[4s, 16s, 60s, 60s, ...]
Both algorithms add -20%/+40% jitter to the retry intervals.
Note that the above intervals are *in addition* to the time spent
waiting for the request to complete (up to `timeout` ms).
NB: the long retry algorithm takes over 20 minutes to complete, with
a default timeout of 60s!
ignore_backoff: true to ignore the historical backoff data
and try the request anyway.
backoff_on_404: Back off if we get a 404
Returns:
Resolves with the HTTP response object on success.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
if timeout:
_sec_timeout = timeout / 1000
else:
_sec_timeout = self.default_timeout
if (
self.hs.config.federation_domain_whitelist is not None
and request.destination not in self.hs.config.federation_domain_whitelist
):
raise FederationDeniedError(request.destination)
limiter = await synapse.util.retryutils.get_retry_limiter(
request.destination,
self.clock,
self._store,
backoff_on_404=backoff_on_404,
ignore_backoff=ignore_backoff,
)
method_bytes = request.method.encode("ascii")
destination_bytes = request.destination.encode("ascii")
path_bytes = request.path.encode("ascii")
if request.query:
query_bytes = encode_query_args(request.query)
else:
query_bytes = b""
scope = start_active_span(
"outgoing-federation-request",
tags={
tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT,
tags.PEER_ADDRESS: request.destination,
tags.HTTP_METHOD: request.method,
tags.HTTP_URL: request.path,
},
finish_on_close=True,
)
# Inject the span into the headers
headers_dict = {} # type: Dict[bytes, List[bytes]]
inject_active_span_byte_dict(headers_dict, request.destination)
headers_dict[b"User-Agent"] = [self.version_string_bytes]
with limiter, scope:
# XXX: Would be much nicer to retry only at the transaction-layer
# (once we have reliable transactions in place)
if long_retries:
retries_left = MAX_LONG_RETRIES
else:
retries_left = MAX_SHORT_RETRIES
url_bytes = request.uri
url_str = url_bytes.decode("ascii")
url_to_sign_bytes = urllib.parse.urlunparse(
(b"", b"", path_bytes, None, query_bytes, b"")
)
while True:
try:
json = request.get_json()
if json:
headers_dict[b"Content-Type"] = [b"application/json"]
auth_headers = self.build_auth_headers(
destination_bytes, method_bytes, url_to_sign_bytes, json
)
data = encode_canonical_json(json)
producer = QuieterFileBodyProducer(
BytesIO(data), cooperator=self._cooperator
) # type: Optional[IBodyProducer]
else:
producer = None
auth_headers = self.build_auth_headers(
destination_bytes, method_bytes, url_to_sign_bytes
)
headers_dict[b"Authorization"] = auth_headers
logger.debug(
"{%s} [%s] Sending request: %s %s; timeout %fs",
request.txn_id,
request.destination,
request.method,
url_str,
_sec_timeout,
)
outgoing_requests_counter.labels(request.method).inc()
try:
with Measure(self.clock, "outbound_request"):
# we don't want all the fancy cookie and redirect handling
# that treq.request gives: just use the raw Agent.
request_deferred = self.agent.request(
method_bytes,
url_bytes,
headers=Headers(headers_dict),
bodyProducer=producer,
)
request_deferred = timeout_deferred(
request_deferred,
timeout=_sec_timeout,
reactor=self.reactor,
)
response = await request_deferred
except DNSLookupError as e:
raise RequestSendFailed(e, can_retry=retry_on_dns_fail) from e
except Exception as e:
raise RequestSendFailed(e, can_retry=True) from e
incoming_responses_counter.labels(
request.method, response.code
).inc()
set_tag(tags.HTTP_STATUS_CODE, response.code)
response_phrase = response.phrase.decode("ascii", errors="replace")
if 200 <= response.code < 300:
logger.debug(
"{%s} [%s] Got response headers: %d %s",
request.txn_id,
request.destination,
response.code,
response_phrase,
)
pass
else:
logger.info(
"{%s} [%s] Got response headers: %d %s",
request.txn_id,
request.destination,
response.code,
response_phrase,
)
# :'(
# Update transactions table?
d = treq.content(response)
d = timeout_deferred(
d, timeout=_sec_timeout, reactor=self.reactor
)
try:
body = await make_deferred_yieldable(d)
except Exception as e:
# Eh, we're already going to raise an exception so lets
# ignore if this fails.
logger.warning(
"{%s} [%s] Failed to get error response: %s %s: %s",
request.txn_id,
request.destination,
request.method,
url_str,
_flatten_response_never_received(e),
)
body = None
exc = HttpResponseException(
response.code, response_phrase, body
)
# Retry if the error is a 429 (Too Many Requests),
# otherwise just raise a standard HttpResponseException
if response.code == 429:
raise RequestSendFailed(exc, can_retry=True) from exc
else:
raise exc
break
except RequestSendFailed as e:
logger.info(
"{%s} [%s] Request failed: %s %s: %s",
request.txn_id,
request.destination,
request.method,
url_str,
_flatten_response_never_received(e.inner_exception),
)
if not e.can_retry:
raise
if retries_left and not timeout:
if long_retries:
delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left)
delay = min(delay, 60)
delay *= random.uniform(0.8, 1.4)
else:
delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left)
delay = min(delay, 2)
delay *= random.uniform(0.8, 1.4)
logger.debug(
"{%s} [%s] Waiting %ss before re-sending...",
request.txn_id,
request.destination,
delay,
)
await self.clock.sleep(delay)
retries_left -= 1
else:
raise
except Exception as e:
logger.warning(
"{%s} [%s] Request failed: %s %s: %s",
request.txn_id,
request.destination,
request.method,
url_str,
_flatten_response_never_received(e),
)
raise
return response
def build_auth_headers(
self,
destination: Optional[bytes],
method: bytes,
url_bytes: bytes,
content: Optional[JsonDict] = None,
destination_is: Optional[bytes] = None,
) -> List[bytes]:
"""
Builds the Authorization headers for a federation request
Args:
destination: The destination homeserver of the request.
May be None if the destination is an identity server, in which case
destination_is must be non-None.
method: The HTTP method of the request
url_bytes: The URI path of the request
content: The body of the request
destination_is: As 'destination', but if the destination is an
identity server
Returns:
A list of headers to be added as "Authorization:" headers
"""
request = {
"method": method.decode("ascii"),
"uri": url_bytes.decode("ascii"),
"origin": self.server_name,
}
if destination is not None:
request["destination"] = destination.decode("ascii")
if destination_is is not None:
request["destination_is"] = destination_is.decode("ascii")
if content is not None:
request["content"] = content
request = sign_json(request, self.server_name, self.signing_key)
auth_headers = []
for key, sig in request["signatures"][self.server_name].items():
auth_headers.append(
(
'X-Matrix origin=%s,key="%s",sig="%s"'
% (self.server_name, key, sig)
).encode("ascii")
)
return auth_headers
async def put_json(
self,
destination: str,
path: str,
args: Optional[QueryArgs] = None,
data: Optional[JsonDict] = None,
json_data_callback: Optional[Callable[[], JsonDict]] = None,
long_retries: bool = False,
timeout: Optional[int] = None,
ignore_backoff: bool = False,
backoff_on_404: bool = False,
try_trailing_slash_on_400: bool = False,
) -> Union[JsonDict, list]:
""" Sends the specified json data using PUT
Args:
destination: The remote server to send the HTTP request to.
path: The HTTP path.
args: query params
data: A dict containing the data that will be used as
the request body. This will be encoded as JSON.
json_data_callback: A callable returning the dict to
use as the request body.
long_retries: whether to use the long retry algorithm. See
docs on _send_request for details.
timeout: number of milliseconds to wait for the response.
self._default_timeout (60s) by default.
Note that we may make several attempts to send the request; this
timeout applies to the time spent waiting for response headers for
*each* attempt (including connection time) as well as the time spent
reading the response body after a 200 response.
ignore_backoff: true to ignore the historical backoff data
and try the request anyway.
backoff_on_404: True if we should count a 404 response as
a failure of the server (and should therefore back off future
requests).
try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED
response we should try appending a trailing slash to the end
of the request. Workaround for #3622 in Synapse <= v0.99.3. This
will be attempted before backing off if backing off has been
enabled.
Returns:
Succeeds when we get a 2xx HTTP response. The
result will be the decoded JSON body.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
method="PUT",
destination=destination,
path=path,
query=args,
json_callback=json_data_callback,
json=data,
)
start_ms = self.clock.time_msec()
response = await self._send_request_with_optional_trailing_slash(
request,
try_trailing_slash_on_400,
backoff_on_404=backoff_on_404,
ignore_backoff=ignore_backoff,
long_retries=long_retries,
timeout=timeout,
)
if timeout is not None:
_sec_timeout = timeout / 1000
else:
_sec_timeout = self.default_timeout
body = await _handle_json_response(
self.reactor, _sec_timeout, request, response, start_ms
)
return body
async def post_json(
self,
destination: str,
path: str,
data: Optional[JsonDict] = None,
long_retries: bool = False,
timeout: Optional[int] = None,
ignore_backoff: bool = False,
args: Optional[QueryArgs] = None,
) -> Union[JsonDict, list]:
""" Sends the specified json data using POST
Args:
destination: The remote server to send the HTTP request to.
path: The HTTP path.
data: A dict containing the data that will be used as
the request body. This will be encoded as JSON.
long_retries: whether to use the long retry algorithm. See
docs on _send_request for details.
timeout: number of milliseconds to wait for the response.
self._default_timeout (60s) by default.
Note that we may make several attempts to send the request; this
timeout applies to the time spent waiting for response headers for
*each* attempt (including connection time) as well as the time spent
reading the response body after a 200 response.
ignore_backoff: true to ignore the historical backoff data and
try the request anyway.
args: query params
Returns:
dict|list: Succeeds when we get a 2xx HTTP response. The
result will be the decoded JSON body.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
method="POST", destination=destination, path=path, query=args, json=data
)
start_ms = self.clock.time_msec()
response = await self._send_request(
request,
long_retries=long_retries,
timeout=timeout,
ignore_backoff=ignore_backoff,
)
if timeout:
_sec_timeout = timeout / 1000
else:
_sec_timeout = self.default_timeout
body = await _handle_json_response(
self.reactor, _sec_timeout, request, response, start_ms,
)
return body
async def get_json(
self,
destination: str,
path: str,
args: Optional[QueryArgs] = None,
retry_on_dns_fail: bool = True,
timeout: Optional[int] = None,
ignore_backoff: bool = False,
try_trailing_slash_on_400: bool = False,
) -> Union[JsonDict, list]:
""" GETs some json from the given host homeserver and path
Args:
destination: The remote server to send the HTTP request to.
path: The HTTP path.
args: A dictionary used to create query strings, defaults to
None.
timeout: number of milliseconds to wait for the response.
self._default_timeout (60s) by default.
Note that we may make several attempts to send the request; this
timeout applies to the time spent waiting for response headers for
*each* attempt (including connection time) as well as the time spent
reading the response body after a 200 response.
ignore_backoff: true to ignore the historical backoff data
and try the request anyway.
try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED
response we should try appending a trailing slash to the end of
the request. Workaround for #3622 in Synapse <= v0.99.3.
Returns:
Succeeds when we get a 2xx HTTP response. The
result will be the decoded JSON body.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
method="GET", destination=destination, path=path, query=args
)
start_ms = self.clock.time_msec()
response = await self._send_request_with_optional_trailing_slash(
request,
try_trailing_slash_on_400,
backoff_on_404=False,
ignore_backoff=ignore_backoff,
retry_on_dns_fail=retry_on_dns_fail,
timeout=timeout,
)
if timeout is not None:
_sec_timeout = timeout / 1000
else:
_sec_timeout = self.default_timeout
body = await _handle_json_response(
self.reactor, _sec_timeout, request, response, start_ms
)
return body
async def delete_json(
self,
destination: str,
path: str,
long_retries: bool = False,
timeout: Optional[int] = None,
ignore_backoff: bool = False,
args: Optional[QueryArgs] = None,
) -> Union[JsonDict, list]:
"""Send a DELETE request to the remote expecting some json response
Args:
destination: The remote server to send the HTTP request to.
path: The HTTP path.
long_retries: whether to use the long retry algorithm. See
docs on _send_request for details.
timeout: number of milliseconds to wait for the response.
self._default_timeout (60s) by default.
Note that we may make several attempts to send the request; this
timeout applies to the time spent waiting for response headers for
*each* attempt (including connection time) as well as the time spent
reading the response body after a 200 response.
ignore_backoff: true to ignore the historical backoff data and
try the request anyway.
args: query params
Returns:
Succeeds when we get a 2xx HTTP response. The
result will be the decoded JSON body.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
method="DELETE", destination=destination, path=path, query=args
)
start_ms = self.clock.time_msec()
response = await self._send_request(
request,
long_retries=long_retries,
timeout=timeout,
ignore_backoff=ignore_backoff,
)
if timeout is not None:
_sec_timeout = timeout / 1000
else:
_sec_timeout = self.default_timeout
body = await _handle_json_response(
self.reactor, _sec_timeout, request, response, start_ms
)
return body
async def get_file(
self,
destination: str,
path: str,
output_stream,
args: Optional[QueryArgs] = None,
retry_on_dns_fail: bool = True,
max_size: Optional[int] = None,
ignore_backoff: bool = False,
) -> Tuple[int, Dict[bytes, List[bytes]]]:
"""GETs a file from a given homeserver
Args:
destination: The remote server to send the HTTP request to.
path: The HTTP path to GET.
output_stream: File to write the response body to.
args: Optional dictionary used to create the query string.
ignore_backoff: true to ignore the historical backoff data
and try the request anyway.
Returns:
Resolves with an (int,dict) tuple of
the file length and a dict of the response headers.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
method="GET", destination=destination, path=path, query=args
)
response = await self._send_request(
request, retry_on_dns_fail=retry_on_dns_fail, ignore_backoff=ignore_backoff
)
headers = dict(response.headers.getAllRawHeaders())
try:
d = readBodyToFile(response, output_stream, max_size)
d.addTimeout(self.default_timeout, self.reactor)
length = await make_deferred_yieldable(d)
except Exception as e:
logger.warning(
"{%s} [%s] Error reading response: %s",
request.txn_id,
request.destination,
e,
)
raise
logger.info(
"{%s} [%s] Completed: %d %s [%d bytes] %s %s",
request.txn_id,
request.destination,
response.code,
response.phrase.decode("ascii", errors="replace"),
length,
request.method,
request.uri.decode("ascii"),
)
return (length, headers)
def _flatten_response_never_received(e):
if hasattr(e, "reasons"):
reasons = ", ".join(
_flatten_response_never_received(f.value) for f in e.reasons
)
return "%s:[%s]" % (type(e).__name__, reasons)
else:
return repr(e)
def check_content_type_is_json(headers: Headers) -> None:
"""
Check that a set of HTTP headers have a Content-Type header, and that it
is application/json.
Args:
headers: headers to check
Raises:
RequestSendFailed: if the Content-Type header is missing or isn't JSON
"""
c_type = headers.getRawHeaders(b"Content-Type")
if c_type is None:
raise RequestSendFailed(
RuntimeError("No Content-Type header received from remote server"),
can_retry=False,
)
c_type = c_type[0].decode("ascii") # only the first header
val, options = cgi.parse_header(c_type)
if val != "application/json":
raise RequestSendFailed(
RuntimeError(
"Remote server sent Content-Type header of '%s', not 'application/json'"
% c_type,
),
can_retry=False,
)
| ./CrossVul/dataset_final_sorted/CWE-400/py/bad_1916_3 |
crossvul-python_data_good_1890_1 | import base64
import re
import pyparsing as pp
from .error import *
UNQUOTE_PAIRS = re.compile(r"\\(.)")
unquote = lambda s, l, t: UNQUOTE_PAIRS.sub(r"\1", t[0][1:-1])
# https://tools.ietf.org/html/rfc7235#section-1.2
# https://tools.ietf.org/html/rfc7235#appendix-B
tchar = "!#$%&'*+-.^_`|~" + pp.nums + pp.alphas
token = pp.Word(tchar).setName("token")
token68 = pp.Combine(pp.Word("-._~+/" + pp.nums + pp.alphas) + pp.ZeroOrMore("=")).setName("token68")
quoted_string = pp.dblQuotedString.copy().setName("quoted-string").setParseAction(unquote)
auth_param_name = token.copy().setName("auth-param-name").addParseAction(pp.downcaseTokens)
auth_param = auth_param_name + pp.Suppress("=") + (token ^ quoted_string)
params = pp.Dict(pp.delimitedList(pp.Group(auth_param)))
scheme = token("scheme")
challenge = scheme + (token68("token") ^ params("params"))
authentication_info = params.copy()
www_authenticate = pp.delimitedList(pp.Group(challenge))
def _parse_authentication_info(headers, headername="authentication-info"):
"""https://tools.ietf.org/html/rfc7615
"""
header = headers.get(headername, "").strip()
if not header:
return {}
try:
parsed = authentication_info.parseString(header)
except pp.ParseException as ex:
# print(ex.explain(ex))
raise MalformedHeader(headername)
return parsed.asDict()
def _parse_www_authenticate(headers, headername="www-authenticate"):
"""Returns a dictionary of dictionaries, one dict per auth_scheme."""
header = headers.get(headername, "").strip()
if not header:
return {}
try:
parsed = www_authenticate.parseString(header)
except pp.ParseException as ex:
# print(ex.explain(ex))
raise MalformedHeader(headername)
retval = {
challenge["scheme"].lower(): challenge["params"].asDict()
if "params" in challenge
else {"token": challenge.get("token")}
for challenge in parsed
}
return retval
| ./CrossVul/dataset_final_sorted/CWE-400/py/good_1890_1 |
crossvul-python_data_bad_1090_0 | # Zulip's main markdown implementation. See docs/subsystems/markdown.md for
# detailed documentation on our markdown syntax.
from typing import (Any, Callable, Dict, Iterable, List, NamedTuple,
Optional, Set, Tuple, TypeVar, Union, cast)
from mypy_extensions import TypedDict
from typing.re import Match, Pattern
import markdown
import logging
import traceback
import urllib
import re
import os
import html
import time
import functools
import ujson
import xml.etree.cElementTree as etree
from xml.etree.cElementTree import Element
from collections import deque, defaultdict
import requests
from django.conf import settings
from django.db.models import Q
from markdown.extensions import codehilite, nl2br, tables
from zerver.lib.bugdown import fenced_code
from zerver.lib.bugdown.fenced_code import FENCE_RE
from zerver.lib.camo import get_camo_url
from zerver.lib.emoji import translate_emoticons, emoticon_regex
from zerver.lib.mention import possible_mentions, \
possible_user_group_mentions, extract_user_group
from zerver.lib.url_encoding import encode_stream
from zerver.lib.thumbnail import user_uploads_or_external
from zerver.lib.timeout import timeout, TimeoutExpired
from zerver.lib.cache import cache_with_key, NotFoundInCache
from zerver.lib.url_preview import preview as link_preview
from zerver.models import (
all_realm_filters,
get_active_streams,
MAX_MESSAGE_LENGTH,
Message,
Realm,
realm_filters_for_realm,
UserProfile,
UserGroup,
UserGroupMembership,
)
import zerver.lib.mention as mention
from zerver.lib.tex import render_tex
from zerver.lib.exceptions import BugdownRenderingException
ReturnT = TypeVar('ReturnT')
def one_time(method: Callable[[], ReturnT]) -> Callable[[], ReturnT]:
'''
Use this decorator with extreme caution.
The function you wrap should have no dependency
on any arguments (no args, no kwargs) nor should
it depend on any global state.
'''
val = None
def cache_wrapper() -> ReturnT:
nonlocal val
if val is None:
val = method()
return val
return cache_wrapper
FullNameInfo = TypedDict('FullNameInfo', {
'id': int,
'email': str,
'full_name': str,
})
DbData = Dict[str, Any]
# Format version of the bugdown rendering; stored along with rendered
# messages so that we can efficiently determine what needs to be re-rendered
version = 1
_T = TypeVar('_T')
ElementStringNone = Union[Element, Optional[str]]
AVATAR_REGEX = r'!avatar\((?P<email>[^)]*)\)'
GRAVATAR_REGEX = r'!gravatar\((?P<email>[^)]*)\)'
EMOJI_REGEX = r'(?P<syntax>:[\w\-\+]+:)'
def verbose_compile(pattern: str) -> Any:
return re.compile(
"^(.*?)%s(.*?)$" % pattern,
re.DOTALL | re.UNICODE | re.VERBOSE
)
def normal_compile(pattern: str) -> Any:
return re.compile(
r"^(.*?)%s(.*)$" % pattern,
re.DOTALL | re.UNICODE
)
STREAM_LINK_REGEX = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
\#\*\* # and after hash sign followed by double asterisks
(?P<stream_name>[^\*]+) # stream name can contain anything
\*\* # ends by double asterisks
"""
@one_time
def get_compiled_stream_link_regex() -> Pattern:
return verbose_compile(STREAM_LINK_REGEX)
LINK_REGEX = None # type: Pattern
def get_web_link_regex() -> str:
# We create this one time, but not at startup. So the
# first message rendered in any process will have some
# extra costs. It's roughly 75ms to run this code, so
# caching the value in LINK_REGEX is super important here.
global LINK_REGEX
if LINK_REGEX is not None:
return LINK_REGEX
tlds = '|'.join(list_of_tlds())
# A link starts at a word boundary, and ends at space, punctuation, or end-of-input.
#
# We detect a url either by the `https?://` or by building around the TLD.
# In lieu of having a recursive regex (which python doesn't support) to match
# arbitrary numbers of nested matching parenthesis, we manually build a regexp that
# can match up to six
# The inner_paren_contents chunk matches the innermore non-parenthesis-holding text,
# and the paren_group matches text with, optionally, a matching set of parens
inner_paren_contents = r"[^\s()\"]*"
paren_group = r"""
[^\s()\"]*? # Containing characters that won't end the URL
(?: \( %s \) # and more characters in matched parens
[^\s()\"]*? # followed by more characters
)* # zero-or-more sets of paired parens
"""
nested_paren_chunk = paren_group
for i in range(6):
nested_paren_chunk = nested_paren_chunk % (paren_group,)
nested_paren_chunk = nested_paren_chunk % (inner_paren_contents,)
file_links = r"| (?:file://(/[^/ ]*)+/?)" if settings.ENABLE_FILE_LINKS else r""
REGEX = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
# (Double-negative lookbehind to allow start-of-string)
(?P<url> # Main group
(?:(?: # Domain part
https?://[\w.:@-]+? # If it has a protocol, anything goes.
|(?: # Or, if not, be more strict to avoid false-positives
(?:[\w-]+\.)+ # One or more domain components, separated by dots
(?:%s) # TLDs (filled in via format from tlds-alpha-by-domain.txt)
)
)
(?:/ # A path, beginning with /
%s # zero-to-6 sets of paired parens
)?) # Path is optional
| (?:[\w.-]+\@[\w.-]+\.[\w]+) # Email is separate, since it can't have a path
%s # File path start with file:///, enable by setting ENABLE_FILE_LINKS=True
| (?:bitcoin:[13][a-km-zA-HJ-NP-Z1-9]{25,34}) # Bitcoin address pattern, see https://mokagio.github.io/tech-journal/2014/11/21/regex-bitcoin.html
)
(?= # URL must be followed by (not included in group)
[!:;\?\),\.\'\"\>]* # Optional punctuation characters
(?:\Z|\s) # followed by whitespace or end of string
)
""" % (tlds, nested_paren_chunk, file_links)
LINK_REGEX = verbose_compile(REGEX)
return LINK_REGEX
def clear_state_for_testing() -> None:
# The link regex never changes in production, but our tests
# try out both sides of ENABLE_FILE_LINKS, so we need
# a way to clear it.
global LINK_REGEX
LINK_REGEX = None
bugdown_logger = logging.getLogger()
def rewrite_local_links_to_relative(db_data: Optional[DbData], link: str) -> str:
""" If the link points to a local destination we can just switch to that
instead of opening a new tab. """
if db_data:
realm_uri_prefix = db_data['realm_uri'] + "/"
if link.startswith(realm_uri_prefix):
# +1 to skip the `/` before the hash link.
return link[len(realm_uri_prefix):]
return link
def url_embed_preview_enabled(message: Optional[Message]=None,
realm: Optional[Realm]=None,
no_previews: Optional[bool]=False) -> bool:
if not settings.INLINE_URL_EMBED_PREVIEW:
return False
if no_previews:
return False
if realm is None:
if message is not None:
realm = message.get_realm()
if realm is None:
# realm can be None for odd use cases
# like generating documentation or running
# test code
return True
return realm.inline_url_embed_preview
def image_preview_enabled(message: Optional[Message]=None,
realm: Optional[Realm]=None,
no_previews: Optional[bool]=False) -> bool:
if not settings.INLINE_IMAGE_PREVIEW:
return False
if no_previews:
return False
if realm is None:
if message is not None:
realm = message.get_realm()
if realm is None:
# realm can be None for odd use cases
# like generating documentation or running
# test code
return True
return realm.inline_image_preview
def list_of_tlds() -> List[str]:
# HACK we manually blacklist a few domains
blacklist = ['PY\n', "MD\n"]
# tlds-alpha-by-domain.txt comes from http://data.iana.org/TLD/tlds-alpha-by-domain.txt
tlds_file = os.path.join(os.path.dirname(__file__), 'tlds-alpha-by-domain.txt')
tlds = [tld.lower().strip() for tld in open(tlds_file, 'r')
if tld not in blacklist and not tld[0].startswith('#')]
tlds.sort(key=len, reverse=True)
return tlds
def walk_tree(root: Element,
processor: Callable[[Element], Optional[_T]],
stop_after_first: bool=False) -> List[_T]:
results = []
queue = deque([root])
while queue:
currElement = queue.popleft()
for child in currElement.getchildren():
if child.getchildren():
queue.append(child)
result = processor(child)
if result is not None:
results.append(result)
if stop_after_first:
return results
return results
ElementFamily = NamedTuple('ElementFamily', [
('grandparent', Optional[Element]),
('parent', Element),
('child', Element)
])
ResultWithFamily = NamedTuple('ResultWithFamily', [
('family', ElementFamily),
('result', Any)
])
ElementPair = NamedTuple('ElementPair', [
('parent', Optional[Element]),
('value', Element)
])
def walk_tree_with_family(root: Element,
processor: Callable[[Element], Optional[_T]]
) -> List[ResultWithFamily]:
results = []
queue = deque([ElementPair(parent=None, value=root)])
while queue:
currElementPair = queue.popleft()
for child in currElementPair.value.getchildren():
if child.getchildren():
queue.append(ElementPair(parent=currElementPair, value=child)) # type: ignore # Lack of Deque support in typing module for Python 3.4.3
result = processor(child)
if result is not None:
if currElementPair.parent is not None:
grandparent_element = cast(ElementPair, currElementPair.parent)
grandparent = grandparent_element.value
else:
grandparent = None
family = ElementFamily(
grandparent=grandparent,
parent=currElementPair.value,
child=child
)
results.append(ResultWithFamily(
family=family,
result=result
))
return results
# height is not actually used
def add_a(
root: Element,
url: str,
link: str,
title: Optional[str]=None,
desc: Optional[str]=None,
class_attr: str="message_inline_image",
data_id: Optional[str]=None,
insertion_index: Optional[int]=None,
already_thumbnailed: Optional[bool]=False
) -> None:
title = title if title is not None else url_filename(link)
title = title if title else ""
desc = desc if desc is not None else ""
if insertion_index is not None:
div = markdown.util.etree.Element("div")
root.insert(insertion_index, div)
else:
div = markdown.util.etree.SubElement(root, "div")
div.set("class", class_attr)
a = markdown.util.etree.SubElement(div, "a")
a.set("href", link)
a.set("target", "_blank")
a.set("title", title)
if data_id is not None:
a.set("data-id", data_id)
img = markdown.util.etree.SubElement(a, "img")
if settings.THUMBNAIL_IMAGES and (not already_thumbnailed) and user_uploads_or_external(url):
# See docs/thumbnailing.md for some high-level documentation.
#
# We strip leading '/' from relative URLs here to ensure
# consistency in what gets passed to /thumbnail
url = url.lstrip('/')
img.set("src", "/thumbnail?url={0}&size=thumbnail".format(
urllib.parse.quote(url, safe='')
))
img.set('data-src-fullsize', "/thumbnail?url={0}&size=full".format(
urllib.parse.quote(url, safe='')
))
else:
img.set("src", url)
if class_attr == "message_inline_ref":
summary_div = markdown.util.etree.SubElement(div, "div")
title_div = markdown.util.etree.SubElement(summary_div, "div")
title_div.set("class", "message_inline_image_title")
title_div.text = title
desc_div = markdown.util.etree.SubElement(summary_div, "desc")
desc_div.set("class", "message_inline_image_desc")
def add_embed(root: Element, link: str, extracted_data: Dict[str, Any]) -> None:
container = markdown.util.etree.SubElement(root, "div")
container.set("class", "message_embed")
img_link = extracted_data.get('image')
if img_link:
parsed_img_link = urllib.parse.urlparse(img_link)
# Append domain where relative img_link url is given
if not parsed_img_link.netloc:
parsed_url = urllib.parse.urlparse(link)
domain = '{url.scheme}://{url.netloc}/'.format(url=parsed_url)
img_link = urllib.parse.urljoin(domain, img_link)
img = markdown.util.etree.SubElement(container, "a")
img.set("style", "background-image: url(" + img_link + ")")
img.set("href", link)
img.set("target", "_blank")
img.set("class", "message_embed_image")
data_container = markdown.util.etree.SubElement(container, "div")
data_container.set("class", "data-container")
title = extracted_data.get('title')
if title:
title_elm = markdown.util.etree.SubElement(data_container, "div")
title_elm.set("class", "message_embed_title")
a = markdown.util.etree.SubElement(title_elm, "a")
a.set("href", link)
a.set("target", "_blank")
a.set("title", title)
a.text = title
description = extracted_data.get('description')
if description:
description_elm = markdown.util.etree.SubElement(data_container, "div")
description_elm.set("class", "message_embed_description")
description_elm.text = description
@cache_with_key(lambda tweet_id: tweet_id, cache_name="database", with_statsd_key="tweet_data")
def fetch_tweet_data(tweet_id: str) -> Optional[Dict[str, Any]]:
if settings.TEST_SUITE:
from . import testing_mocks
res = testing_mocks.twitter(tweet_id)
else:
creds = {
'consumer_key': settings.TWITTER_CONSUMER_KEY,
'consumer_secret': settings.TWITTER_CONSUMER_SECRET,
'access_token_key': settings.TWITTER_ACCESS_TOKEN_KEY,
'access_token_secret': settings.TWITTER_ACCESS_TOKEN_SECRET,
}
if not all(creds.values()):
return None
# We lazily import twitter here because its import process is
# surprisingly slow, and doing so has a significant impact on
# the startup performance of `manage.py` commands.
import twitter
try:
api = twitter.Api(tweet_mode='extended', **creds)
# Sometimes Twitter hangs on responses. Timing out here
# will cause the Tweet to go through as-is with no inline
# preview, rather than having the message be rejected
# entirely. This timeout needs to be less than our overall
# formatting timeout.
tweet = timeout(3, api.GetStatus, tweet_id)
res = tweet.AsDict()
except AttributeError:
bugdown_logger.error('Unable to load twitter api, you may have the wrong '
'library installed, see https://github.com/zulip/zulip/issues/86')
return None
except TimeoutExpired:
# We'd like to try again later and not cache the bad result,
# so we need to re-raise the exception (just as though
# we were being rate-limited)
raise
except twitter.TwitterError as e:
t = e.args[0]
if len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 34):
# Code 34 means that the message doesn't exist; return
# None so that we will cache the error
return None
elif len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 88 or
t[0]['code'] == 130):
# Code 88 means that we were rate-limited and 130
# means Twitter is having capacity issues; either way
# just raise the error so we don't cache None and will
# try again later.
raise
else:
# It's not clear what to do in cases of other errors,
# but for now it seems reasonable to log at error
# level (so that we get notified), but then cache the
# failure to proceed with our usual work
bugdown_logger.error(traceback.format_exc())
return None
return res
HEAD_START_RE = re.compile('^head[ >]')
HEAD_END_RE = re.compile('^/head[ >]')
META_START_RE = re.compile('^meta[ >]')
META_END_RE = re.compile('^/meta[ >]')
def fetch_open_graph_image(url: str) -> Optional[Dict[str, Any]]:
in_head = False
# HTML will auto close meta tags, when we start the next tag add
# a closing tag if it has not been closed yet.
last_closed = True
head = []
# TODO: What if response content is huge? Should we get headers first?
try:
content = requests.get(url, timeout=1).text
except Exception:
return None
# Extract the head and meta tags
# All meta tags are self closing, have no children or are closed
# automatically.
for part in content.split('<'):
if not in_head and HEAD_START_RE.match(part):
# Started the head node output it to have a document root
in_head = True
head.append('<head>')
elif in_head and HEAD_END_RE.match(part):
# Found the end of the head close any remaining tag then stop
# processing
in_head = False
if not last_closed:
last_closed = True
head.append('</meta>')
head.append('</head>')
break
elif in_head and META_START_RE.match(part):
# Found a meta node copy it
if not last_closed:
head.append('</meta>')
last_closed = True
head.append('<')
head.append(part)
if '/>' not in part:
last_closed = False
elif in_head and META_END_RE.match(part):
# End of a meta node just copy it to close the tag
head.append('<')
head.append(part)
last_closed = True
try:
doc = etree.fromstring(''.join(head))
except etree.ParseError:
return None
og_image = doc.find('meta[@property="og:image"]')
og_title = doc.find('meta[@property="og:title"]')
og_desc = doc.find('meta[@property="og:description"]')
title = None
desc = None
if og_image is not None:
image = og_image.get('content')
else:
return None
if og_title is not None:
title = og_title.get('content')
if og_desc is not None:
desc = og_desc.get('content')
return {'image': image, 'title': title, 'desc': desc}
def get_tweet_id(url: str) -> Optional[str]:
parsed_url = urllib.parse.urlparse(url)
if not (parsed_url.netloc == 'twitter.com' or parsed_url.netloc.endswith('.twitter.com')):
return None
to_match = parsed_url.path
# In old-style twitter.com/#!/wdaher/status/1231241234-style URLs,
# we need to look at the fragment instead
if parsed_url.path == '/' and len(parsed_url.fragment) > 5:
to_match = parsed_url.fragment
tweet_id_match = re.match(r'^!?/.*?/status(es)?/(?P<tweetid>\d{10,30})(/photo/[0-9])?/?$', to_match)
if not tweet_id_match:
return None
return tweet_id_match.group("tweetid")
class InlineHttpsProcessor(markdown.treeprocessors.Treeprocessor):
def run(self, root: Element) -> None:
# Get all URLs from the blob
found_imgs = walk_tree(root, lambda e: e if e.tag == "img" else None)
for img in found_imgs:
url = img.get("src")
if not url.startswith("http://"):
# Don't rewrite images on our own site (e.g. emoji).
continue
img.set("src", get_camo_url(url))
class BacktickPattern(markdown.inlinepatterns.Pattern):
""" Return a `<code>` element containing the matching text. """
def __init__(self, pattern: str) -> None:
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.ESCAPED_BSLASH = '%s%s%s' % (markdown.util.STX, ord('\\'), markdown.util.ETX)
self.tag = 'code'
def handleMatch(self, m: Match[str]) -> Union[str, Element]:
if m.group(4):
el = markdown.util.etree.Element(self.tag)
# Modified to not strip whitespace
el.text = markdown.util.AtomicString(m.group(4))
return el
else:
return m.group(2).replace('\\\\', self.ESCAPED_BSLASH)
class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):
TWITTER_MAX_IMAGE_HEIGHT = 400
TWITTER_MAX_TO_PREVIEW = 3
INLINE_PREVIEW_LIMIT_PER_MESSAGE = 5
def __init__(self, md: markdown.Markdown) -> None:
markdown.treeprocessors.Treeprocessor.__init__(self, md)
def get_actual_image_url(self, url: str) -> str:
# Add specific per-site cases to convert image-preview urls to image urls.
# See https://github.com/zulip/zulip/issues/4658 for more information
parsed_url = urllib.parse.urlparse(url)
if (parsed_url.netloc == 'github.com' or parsed_url.netloc.endswith('.github.com')):
# https://github.com/zulip/zulip/blob/master/static/images/logo/zulip-icon-128x128.png ->
# https://raw.githubusercontent.com/zulip/zulip/master/static/images/logo/zulip-icon-128x128.png
split_path = parsed_url.path.split('/')
if len(split_path) > 3 and split_path[3] == "blob":
return urllib.parse.urljoin('https://raw.githubusercontent.com',
'/'.join(split_path[0:3] + split_path[4:]))
return url
def is_image(self, url: str) -> bool:
if not self.markdown.image_preview_enabled:
return False
parsed_url = urllib.parse.urlparse(url)
# List from http://support.google.com/chromeos/bin/answer.py?hl=en&answer=183093
for ext in [".bmp", ".gif", ".jpg", "jpeg", ".png", ".webp"]:
if parsed_url.path.lower().endswith(ext):
return True
return False
def dropbox_image(self, url: str) -> Optional[Dict[str, Any]]:
# TODO: The returned Dict could possibly be a TypedDict in future.
parsed_url = urllib.parse.urlparse(url)
if (parsed_url.netloc == 'dropbox.com' or parsed_url.netloc.endswith('.dropbox.com')):
is_album = parsed_url.path.startswith('/sc/') or parsed_url.path.startswith('/photos/')
# Only allow preview Dropbox shared links
if not (parsed_url.path.startswith('/s/') or
parsed_url.path.startswith('/sh/') or
is_album):
return None
# Try to retrieve open graph protocol info for a preview
# This might be redundant right now for shared links for images.
# However, we might want to make use of title and description
# in the future. If the actual image is too big, we might also
# want to use the open graph image.
image_info = fetch_open_graph_image(url)
is_image = is_album or self.is_image(url)
# If it is from an album or not an actual image file,
# just use open graph image.
if is_album or not is_image:
# Failed to follow link to find an image preview so
# use placeholder image and guess filename
if image_info is None:
return None
image_info["is_image"] = is_image
return image_info
# Otherwise, try to retrieve the actual image.
# This is because open graph image from Dropbox may have padding
# and gifs do not work.
# TODO: What if image is huge? Should we get headers first?
if image_info is None:
image_info = dict()
image_info['is_image'] = True
parsed_url_list = list(parsed_url)
parsed_url_list[4] = "dl=1" # Replaces query
image_info["image"] = urllib.parse.urlunparse(parsed_url_list)
return image_info
return None
def youtube_id(self, url: str) -> Optional[str]:
if not self.markdown.image_preview_enabled:
return None
# Youtube video id extraction regular expression from http://pastebin.com/KyKAFv1s
# Slightly modified to support URLs of the form youtu.be/<id>
# If it matches, match.group(2) is the video id.
schema_re = r'(?:https?://)'
host_re = r'(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)'
param_re = r'(?:(?:(?:v|embed)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))'
id_re = r'([0-9A-Za-z_-]+)'
youtube_re = r'^({schema_re}?{host_re}{param_re}?)?{id_re}(?(1).+)?$'
youtube_re = youtube_re.format(schema_re=schema_re, host_re=host_re, id_re=id_re, param_re=param_re)
match = re.match(youtube_re, url)
if match is None:
return None
return match.group(2)
def youtube_image(self, url: str) -> Optional[str]:
yt_id = self.youtube_id(url)
if yt_id is not None:
return "https://i.ytimg.com/vi/%s/default.jpg" % (yt_id,)
return None
def vimeo_id(self, url: str) -> Optional[str]:
if not self.markdown.image_preview_enabled:
return None
#(http|https)?:\/\/(www\.)?vimeo.com\/(?:channels\/(?:\w+\/)?|groups\/([^\/]*)\/videos\/|)(\d+)(?:|\/\?)
# If it matches, match.group('id') is the video id.
vimeo_re = r'^((http|https)?:\/\/(www\.)?vimeo.com\/' + \
r'(?:channels\/(?:\w+\/)?|groups\/' + \
r'([^\/]*)\/videos\/|)(\d+)(?:|\/\?))$'
match = re.match(vimeo_re, url)
if match is None:
return None
return match.group(5)
def vimeo_title(self, extracted_data: Dict[str, Any]) -> Optional[str]:
title = extracted_data.get("title")
if title is not None:
return "Vimeo - {}".format(title)
return None
def twitter_text(self, text: str,
urls: List[Dict[str, str]],
user_mentions: List[Dict[str, Any]],
media: List[Dict[str, Any]]) -> Element:
"""
Use data from the twitter API to turn links, mentions and media into A
tags. Also convert unicode emojis to images.
This works by using the urls, user_mentions and media data from
the twitter API and searching for unicode emojis in the text using
`unicode_emoji_regex`.
The first step is finding the locations of the URLs, mentions, media and
emoji in the text. For each match we build a dictionary with type, the start
location, end location, the URL to link to, and the text(codepoint and title
in case of emojis) to be used in the link(image in case of emojis).
Next we sort the matches by start location. And for each we add the
text from the end of the last link to the start of the current link to
the output. The text needs to added to the text attribute of the first
node (the P tag) or the tail the last link created.
Finally we add any remaining text to the last node.
"""
to_process = [] # type: List[Dict[str, Any]]
# Build dicts for URLs
for url_data in urls:
short_url = url_data["url"]
full_url = url_data["expanded_url"]
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_process.append({
'type': 'url',
'start': match.start(),
'end': match.end(),
'url': short_url,
'text': full_url,
})
# Build dicts for mentions
for user_mention in user_mentions:
screen_name = user_mention['screen_name']
mention_string = '@' + screen_name
for match in re.finditer(re.escape(mention_string), text, re.IGNORECASE):
to_process.append({
'type': 'mention',
'start': match.start(),
'end': match.end(),
'url': 'https://twitter.com/' + urllib.parse.quote(screen_name),
'text': mention_string,
})
# Build dicts for media
for media_item in media:
short_url = media_item['url']
expanded_url = media_item['expanded_url']
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_process.append({
'type': 'media',
'start': match.start(),
'end': match.end(),
'url': short_url,
'text': expanded_url,
})
# Build dicts for emojis
for match in re.finditer(unicode_emoji_regex, text, re.IGNORECASE):
orig_syntax = match.group('syntax')
codepoint = unicode_emoji_to_codepoint(orig_syntax)
if codepoint in codepoint_to_name:
display_string = ':' + codepoint_to_name[codepoint] + ':'
to_process.append({
'type': 'emoji',
'start': match.start(),
'end': match.end(),
'codepoint': codepoint,
'title': display_string,
})
to_process.sort(key=lambda x: x['start'])
p = current_node = markdown.util.etree.Element('p')
def set_text(text: str) -> None:
"""
Helper to set the text or the tail of the current_node
"""
if current_node == p:
current_node.text = text
else:
current_node.tail = text
db_data = self.markdown.zulip_db_data
current_index = 0
for item in to_process:
# The text we want to link starts in already linked text skip it
if item['start'] < current_index:
continue
# Add text from the end of last link to the start of the current
# link
set_text(text[current_index:item['start']])
current_index = item['end']
if item['type'] != 'emoji':
current_node = elem = url_to_a(db_data, item['url'], item['text'])
else:
current_node = elem = make_emoji(item['codepoint'], item['title'])
p.append(elem)
# Add any unused text
set_text(text[current_index:])
return p
def twitter_link(self, url: str) -> Optional[Element]:
tweet_id = get_tweet_id(url)
if tweet_id is None:
return None
try:
res = fetch_tweet_data(tweet_id)
if res is None:
return None
user = res['user'] # type: Dict[str, Any]
tweet = markdown.util.etree.Element("div")
tweet.set("class", "twitter-tweet")
img_a = markdown.util.etree.SubElement(tweet, 'a')
img_a.set("href", url)
img_a.set("target", "_blank")
profile_img = markdown.util.etree.SubElement(img_a, 'img')
profile_img.set('class', 'twitter-avatar')
# For some reason, for, e.g. tweet 285072525413724161,
# python-twitter does not give us a
# profile_image_url_https, but instead puts that URL in
# profile_image_url. So use _https if available, but fall
# back gracefully.
image_url = user.get('profile_image_url_https', user['profile_image_url'])
profile_img.set('src', image_url)
text = html.unescape(res['full_text'])
urls = res.get('urls', [])
user_mentions = res.get('user_mentions', [])
media = res.get('media', []) # type: List[Dict[str, Any]]
p = self.twitter_text(text, urls, user_mentions, media)
tweet.append(p)
span = markdown.util.etree.SubElement(tweet, 'span')
span.text = "- %s (@%s)" % (user['name'], user['screen_name'])
# Add image previews
for media_item in media:
# Only photos have a preview image
if media_item['type'] != 'photo':
continue
# Find the image size that is smaller than
# TWITTER_MAX_IMAGE_HEIGHT px tall or the smallest
size_name_tuples = list(media_item['sizes'].items())
size_name_tuples.sort(reverse=True,
key=lambda x: x[1]['h'])
for size_name, size in size_name_tuples:
if size['h'] < self.TWITTER_MAX_IMAGE_HEIGHT:
break
media_url = '%s:%s' % (media_item['media_url_https'], size_name)
img_div = markdown.util.etree.SubElement(tweet, 'div')
img_div.set('class', 'twitter-image')
img_a = markdown.util.etree.SubElement(img_div, 'a')
img_a.set('href', media_item['url'])
img_a.set('target', '_blank')
img_a.set('title', media_item['url'])
img = markdown.util.etree.SubElement(img_a, 'img')
img.set('src', media_url)
return tweet
except Exception:
# We put this in its own try-except because it requires external
# connectivity. If Twitter flakes out, we don't want to not-render
# the entire message; we just want to not show the Twitter preview.
bugdown_logger.warning(traceback.format_exc())
return None
def get_url_data(self, e: Element) -> Optional[Tuple[str, str]]:
if e.tag == "a":
if e.text is not None:
return (e.get("href"), e.text)
return (e.get("href"), e.get("href"))
return None
def handle_image_inlining(self, root: Element, found_url: ResultWithFamily) -> None:
grandparent = found_url.family.grandparent
parent = found_url.family.parent
ahref_element = found_url.family.child
(url, text) = found_url.result
actual_url = self.get_actual_image_url(url)
# url != text usually implies a named link, which we opt not to remove
url_eq_text = (url == text)
if parent.tag == 'li':
add_a(parent, self.get_actual_image_url(url), url, title=text)
if not parent.text and not ahref_element.tail and url_eq_text:
parent.remove(ahref_element)
elif parent.tag == 'p':
parent_index = None
for index, uncle in enumerate(grandparent.getchildren()):
if uncle is parent:
parent_index = index
break
if parent_index is not None:
ins_index = self.find_proper_insertion_index(grandparent, parent, parent_index)
add_a(grandparent, actual_url, url, title=text, insertion_index=ins_index)
else:
# We're not inserting after parent, since parent not found.
# Append to end of list of grandparent's children as normal
add_a(grandparent, actual_url, url, title=text)
# If link is alone in a paragraph, delete paragraph containing it
if (len(parent.getchildren()) == 1 and
(not parent.text or parent.text == "\n") and
not ahref_element.tail and
url_eq_text):
grandparent.remove(parent)
else:
# If none of the above criteria match, fall back to old behavior
add_a(root, actual_url, url, title=text)
def find_proper_insertion_index(self, grandparent: Element, parent: Element,
parent_index_in_grandparent: int) -> int:
# If there are several inline images from same paragraph, ensure that
# they are in correct (and not opposite) order by inserting after last
# inline image from paragraph 'parent'
uncles = grandparent.getchildren()
parent_links = [ele.attrib['href'] for ele in parent.iter(tag="a")]
insertion_index = parent_index_in_grandparent
while True:
insertion_index += 1
if insertion_index >= len(uncles):
return insertion_index
uncle = uncles[insertion_index]
inline_image_classes = ['message_inline_image', 'message_inline_ref']
if (
uncle.tag != 'div' or
'class' not in uncle.keys() or
uncle.attrib['class'] not in inline_image_classes
):
return insertion_index
uncle_link = list(uncle.iter(tag="a"))[0].attrib['href']
if uncle_link not in parent_links:
return insertion_index
def is_absolute_url(self, url: str) -> bool:
return bool(urllib.parse.urlparse(url).netloc)
def run(self, root: Element) -> None:
# Get all URLs from the blob
found_urls = walk_tree_with_family(root, self.get_url_data)
if len(found_urls) == 0 or len(found_urls) > self.INLINE_PREVIEW_LIMIT_PER_MESSAGE:
return
rendered_tweet_count = 0
for found_url in found_urls:
(url, text) = found_url.result
if not self.is_absolute_url(url):
if self.is_image(url):
self.handle_image_inlining(root, found_url)
# We don't have a strong use case for doing url preview for relative links.
continue
dropbox_image = self.dropbox_image(url)
if dropbox_image is not None:
class_attr = "message_inline_ref"
is_image = dropbox_image["is_image"]
if is_image:
class_attr = "message_inline_image"
# Not making use of title and description of images
add_a(root, dropbox_image['image'], url,
title=dropbox_image.get('title', ""),
desc=dropbox_image.get('desc', ""),
class_attr=class_attr,
already_thumbnailed=True)
continue
if self.is_image(url):
self.handle_image_inlining(root, found_url)
continue
if get_tweet_id(url) is not None:
if rendered_tweet_count >= self.TWITTER_MAX_TO_PREVIEW:
# Only render at most one tweet per message
continue
twitter_data = self.twitter_link(url)
if twitter_data is None:
# This link is not actually a tweet known to twitter
continue
rendered_tweet_count += 1
div = markdown.util.etree.SubElement(root, "div")
div.set("class", "inline-preview-twitter")
div.insert(0, twitter_data)
continue
youtube = self.youtube_image(url)
if youtube is not None:
yt_id = self.youtube_id(url)
add_a(root, youtube, url, None, None,
"youtube-video message_inline_image",
yt_id, already_thumbnailed=True)
continue
db_data = self.markdown.zulip_db_data
if db_data and db_data['sent_by_bot']:
continue
if not self.markdown.url_embed_preview_enabled:
continue
try:
extracted_data = link_preview.link_embed_data_from_cache(url)
except NotFoundInCache:
self.markdown.zulip_message.links_for_preview.add(url)
continue
if extracted_data:
vm_id = self.vimeo_id(url)
if vm_id is not None:
vimeo_image = extracted_data.get('image')
vimeo_title = self.vimeo_title(extracted_data)
if vimeo_image is not None:
add_a(root, vimeo_image, url, vimeo_title,
None, "vimeo-video message_inline_image", vm_id,
already_thumbnailed=True)
if vimeo_title is not None:
found_url.family.child.text = vimeo_title
else:
add_embed(root, url, extracted_data)
class Avatar(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
img = markdown.util.etree.Element('img')
email_address = match.group('email')
email = email_address.strip().lower()
profile_id = None
db_data = self.markdown.zulip_db_data
if db_data is not None:
user_dict = db_data['email_info'].get(email)
if user_dict is not None:
profile_id = user_dict['id']
img.set('class', 'message_body_gravatar')
img.set('src', '/avatar/{0}?s=30'.format(profile_id or email))
img.set('title', email)
img.set('alt', email)
return img
def possible_avatar_emails(content: str) -> Set[str]:
emails = set()
for REGEX in [AVATAR_REGEX, GRAVATAR_REGEX]:
matches = re.findall(REGEX, content)
for email in matches:
if email:
emails.add(email)
return emails
path_to_name_to_codepoint = os.path.join(settings.STATIC_ROOT,
"generated", "emoji", "name_to_codepoint.json")
with open(path_to_name_to_codepoint) as name_to_codepoint_file:
name_to_codepoint = ujson.load(name_to_codepoint_file)
path_to_codepoint_to_name = os.path.join(settings.STATIC_ROOT,
"generated", "emoji", "codepoint_to_name.json")
with open(path_to_codepoint_to_name) as codepoint_to_name_file:
codepoint_to_name = ujson.load(codepoint_to_name_file)
# All of our emojis(non ZWJ sequences) belong to one of these unicode blocks:
# \U0001f100-\U0001f1ff - Enclosed Alphanumeric Supplement
# \U0001f200-\U0001f2ff - Enclosed Ideographic Supplement
# \U0001f300-\U0001f5ff - Miscellaneous Symbols and Pictographs
# \U0001f600-\U0001f64f - Emoticons (Emoji)
# \U0001f680-\U0001f6ff - Transport and Map Symbols
# \U0001f900-\U0001f9ff - Supplemental Symbols and Pictographs
# \u2000-\u206f - General Punctuation
# \u2300-\u23ff - Miscellaneous Technical
# \u2400-\u243f - Control Pictures
# \u2440-\u245f - Optical Character Recognition
# \u2460-\u24ff - Enclosed Alphanumerics
# \u2500-\u257f - Box Drawing
# \u2580-\u259f - Block Elements
# \u25a0-\u25ff - Geometric Shapes
# \u2600-\u26ff - Miscellaneous Symbols
# \u2700-\u27bf - Dingbats
# \u2900-\u297f - Supplemental Arrows-B
# \u2b00-\u2bff - Miscellaneous Symbols and Arrows
# \u3000-\u303f - CJK Symbols and Punctuation
# \u3200-\u32ff - Enclosed CJK Letters and Months
unicode_emoji_regex = '(?P<syntax>['\
'\U0001F100-\U0001F64F' \
'\U0001F680-\U0001F6FF' \
'\U0001F900-\U0001F9FF' \
'\u2000-\u206F' \
'\u2300-\u27BF' \
'\u2900-\u297F' \
'\u2B00-\u2BFF' \
'\u3000-\u303F' \
'\u3200-\u32FF' \
'])'
# The equivalent JS regex is \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f]|\ud83d[\ude80-\udeff]|
# \ud83e[\udd00-\uddff]|[\u2000-\u206f]|[\u2300-\u27bf]|[\u2b00-\u2bff]|[\u3000-\u303f]|
# [\u3200-\u32ff]. See below comments for explanation. The JS regex is used by marked.js for
# frontend unicode emoji processing.
# The JS regex \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f] represents U0001f100-\U0001f64f
# The JS regex \ud83d[\ude80-\udeff] represents \U0001f680-\U0001f6ff
# The JS regex \ud83e[\udd00-\uddff] represents \U0001f900-\U0001f9ff
# The JS regex [\u2000-\u206f] represents \u2000-\u206f
# The JS regex [\u2300-\u27bf] represents \u2300-\u27bf
# Similarly other JS regexes can be mapped to the respective unicode blocks.
# For more information, please refer to the following article:
# http://crocodillon.com/blog/parsing-emoji-unicode-in-javascript
def make_emoji(codepoint: str, display_string: str) -> Element:
# Replace underscore in emoji's title with space
title = display_string[1:-1].replace("_", " ")
span = markdown.util.etree.Element('span')
span.set('class', 'emoji emoji-%s' % (codepoint,))
span.set('title', title)
span.set('role', 'img')
span.set('aria-label', title)
span.text = display_string
return span
def make_realm_emoji(src: str, display_string: str) -> Element:
elt = markdown.util.etree.Element('img')
elt.set('src', src)
elt.set('class', 'emoji')
elt.set("alt", display_string)
elt.set("title", display_string[1:-1].replace("_", " "))
return elt
def unicode_emoji_to_codepoint(unicode_emoji: str) -> str:
codepoint = hex(ord(unicode_emoji))[2:]
# Unicode codepoints are minimum of length 4, padded
# with zeroes if the length is less than zero.
while len(codepoint) < 4:
codepoint = '0' + codepoint
return codepoint
class EmoticonTranslation(markdown.inlinepatterns.Pattern):
""" Translates emoticons like `:)` into emoji like `:smile:`. """
def handleMatch(self, match: Match[str]) -> Optional[Element]:
db_data = self.markdown.zulip_db_data
if db_data is None or not db_data['translate_emoticons']:
return None
emoticon = match.group('emoticon')
translated = translate_emoticons(emoticon)
name = translated[1:-1]
return make_emoji(name_to_codepoint[name], translated)
class UnicodeEmoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
orig_syntax = match.group('syntax')
codepoint = unicode_emoji_to_codepoint(orig_syntax)
if codepoint in codepoint_to_name:
display_string = ':' + codepoint_to_name[codepoint] + ':'
return make_emoji(codepoint, display_string)
else:
return None
class Emoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
orig_syntax = match.group("syntax")
name = orig_syntax[1:-1]
active_realm_emoji = {} # type: Dict[str, Dict[str, str]]
db_data = self.markdown.zulip_db_data
if db_data is not None:
active_realm_emoji = db_data['active_realm_emoji']
if self.markdown.zulip_message and name in active_realm_emoji:
return make_realm_emoji(active_realm_emoji[name]['source_url'], orig_syntax)
elif name == 'zulip':
return make_realm_emoji('/static/generated/emoji/images/emoji/unicode/zulip.png', orig_syntax)
elif name in name_to_codepoint:
return make_emoji(name_to_codepoint[name], orig_syntax)
else:
return None
def content_has_emoji_syntax(content: str) -> bool:
return re.search(EMOJI_REGEX, content) is not None
class ModalLink(markdown.inlinepatterns.Pattern):
"""
A pattern that allows including in-app modal links in messages.
"""
def handleMatch(self, match: Match[str]) -> Element:
relative_url = match.group('relative_url')
text = match.group('text')
a_tag = markdown.util.etree.Element("a")
a_tag.set("href", relative_url)
a_tag.set("title", relative_url)
a_tag.text = text
return a_tag
class Tex(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Element:
rendered = render_tex(match.group('body'), is_inline=True)
if rendered is not None:
return etree.fromstring(rendered.encode('utf-8'))
else: # Something went wrong while rendering
span = markdown.util.etree.Element('span')
span.set('class', 'tex-error')
span.text = '$$' + match.group('body') + '$$'
return span
upload_title_re = re.compile("^(https?://[^/]*)?(/user_uploads/\\d+)(/[^/]*)?/[^/]*/(?P<filename>[^/]*)$")
def url_filename(url: str) -> str:
"""Extract the filename if a URL is an uploaded file, or return the original URL"""
match = upload_title_re.match(url)
if match:
return match.group('filename')
else:
return url
def fixup_link(link: markdown.util.etree.Element, target_blank: bool=True) -> None:
"""Set certain attributes we want on every link."""
if target_blank:
link.set('target', '_blank')
link.set('title', url_filename(link.get('href')))
def sanitize_url(url: str) -> Optional[str]:
"""
Sanitize a url against xss attacks.
See the docstring on markdown.inlinepatterns.LinkPattern.sanitize_url.
"""
try:
parts = urllib.parse.urlparse(url.replace(' ', '%20'))
scheme, netloc, path, params, query, fragment = parts
except ValueError:
# Bad url - so bad it couldn't be parsed.
return ''
# If there is no scheme or netloc and there is a '@' in the path,
# treat it as a mailto: and set the appropriate scheme
if scheme == '' and netloc == '' and '@' in path:
scheme = 'mailto'
elif scheme == '' and netloc == '' and len(path) > 0 and path[0] == '/':
# Allow domain-relative links
return urllib.parse.urlunparse(('', '', path, params, query, fragment))
elif (scheme, netloc, path, params, query) == ('', '', '', '', '') and len(fragment) > 0:
# Allow fragment links
return urllib.parse.urlunparse(('', '', '', '', '', fragment))
# Zulip modification: If scheme is not specified, assume http://
# We re-enter sanitize_url because netloc etc. need to be re-parsed.
if not scheme:
return sanitize_url('http://' + url)
locless_schemes = ['mailto', 'news', 'file', 'bitcoin']
if netloc == '' and scheme not in locless_schemes:
# This fails regardless of anything else.
# Return immediately to save additional processing
return None
# Upstream code will accept a URL like javascript://foo because it
# appears to have a netloc. Additionally there are plenty of other
# schemes that do weird things like launch external programs. To be
# on the safe side, we whitelist the scheme.
if scheme not in ('http', 'https', 'ftp', 'mailto', 'file', 'bitcoin'):
return None
# Upstream code scans path, parameters, and query for colon characters
# because
#
# some aliases [for javascript:] will appear to urllib.parse to have
# no scheme. On top of that relative links (i.e.: "foo/bar.html")
# have no scheme.
#
# We already converted an empty scheme to http:// above, so we skip
# the colon check, which would also forbid a lot of legitimate URLs.
# Url passes all tests. Return url as-is.
return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment))
def url_to_a(db_data: Optional[DbData], url: str, text: Optional[str]=None) -> Union[Element, str]:
a = markdown.util.etree.Element('a')
href = sanitize_url(url)
target_blank = True
if href is None:
# Rejected by sanitize_url; render it as plain text.
return url
if text is None:
text = markdown.util.AtomicString(url)
href = rewrite_local_links_to_relative(db_data, href)
target_blank = not href.startswith("#narrow") and not href.startswith('mailto:')
a.set('href', href)
a.text = text
fixup_link(a, target_blank)
return a
class CompiledPattern(markdown.inlinepatterns.Pattern):
def __init__(self, compiled_re: Pattern, md: markdown.Markdown) -> None:
# This is similar to the superclass's small __init__ function,
# but we skip the compilation step and let the caller give us
# a compiled regex.
self.compiled_re = compiled_re
self.md = md
class AutoLink(CompiledPattern):
def handleMatch(self, match: Match[str]) -> ElementStringNone:
url = match.group('url')
db_data = self.markdown.zulip_db_data
return url_to_a(db_data, url)
class UListProcessor(markdown.blockprocessors.UListProcessor):
""" Process unordered list blocks.
Based on markdown.blockprocessors.UListProcessor, but does not accept
'+' or '-' as a bullet character."""
TAG = 'ul'
RE = re.compile('^[ ]{0,3}[*][ ]+(.*)')
def __init__(self, parser: Any) -> None:
# HACK: Set the tab length to 2 just for the initialization of
# this class, so that bulleted lists (and only bulleted lists)
# work off 2-space indentation.
parser.markdown.tab_length = 2
super().__init__(parser)
parser.markdown.tab_length = 4
class ListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):
""" Process unordered list blocks.
Based on markdown.blockprocessors.ListIndentProcessor, but with 2-space indent
"""
def __init__(self, parser: Any) -> None:
# HACK: Set the tab length to 2 just for the initialization of
# this class, so that bulleted lists (and only bulleted lists)
# work off 2-space indentation.
parser.markdown.tab_length = 2
super().__init__(parser)
parser.markdown.tab_length = 4
class BlockQuoteProcessor(markdown.blockprocessors.BlockQuoteProcessor):
""" Process BlockQuotes.
Based on markdown.blockprocessors.BlockQuoteProcessor, but with 2-space indent
"""
# Original regex for blockquote is RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
RE = re.compile(r'(^|\n)(?!(?:[ ]{0,3}>\s*(?:$|\n))*(?:$|\n))'
r'[ ]{0,3}>[ ]?(.*)')
mention_re = re.compile(mention.find_mentions)
def clean(self, line: str) -> str:
# Silence all the mentions inside blockquotes
line = re.sub(self.mention_re, lambda m: "@_{}".format(m.group('match')), line)
# And then run the upstream processor's code for removing the '>'
return super().clean(line)
class BugdownUListPreprocessor(markdown.preprocessors.Preprocessor):
""" Allows unordered list blocks that come directly after a
paragraph to be rendered as an unordered list
Detects paragraphs that have a matching list item that comes
directly after a line of text, and inserts a newline between
to satisfy Markdown"""
LI_RE = re.compile('^[ ]{0,3}[*][ ]+(.*)', re.MULTILINE)
HANGING_ULIST_RE = re.compile('^.+\\n([ ]{0,3}[*][ ]+.*)', re.MULTILINE)
def run(self, lines: List[str]) -> List[str]:
""" Insert a newline between a paragraph and ulist if missing """
inserts = 0
fence = None
copy = lines[:]
for i in range(len(lines) - 1):
# Ignore anything that is inside a fenced code block
m = FENCE_RE.match(lines[i])
if not fence and m:
fence = m.group('fence')
elif fence and m and fence == m.group('fence'):
fence = None
# If we're not in a fenced block and we detect an upcoming list
# hanging off a paragraph, add a newline
if (not fence and lines[i] and
self.LI_RE.match(lines[i+1]) and
not self.LI_RE.match(lines[i])):
copy.insert(i+inserts+1, '')
inserts += 1
return copy
class AutoNumberOListPreprocessor(markdown.preprocessors.Preprocessor):
""" Finds a sequence of lines numbered by the same number"""
RE = re.compile(r'^([ ]*)(\d+)\.[ ]+(.*)')
TAB_LENGTH = 2
def run(self, lines: List[str]) -> List[str]:
new_lines = [] # type: List[str]
current_list = [] # type: List[Match[str]]
current_indent = 0
for line in lines:
m = self.RE.match(line)
# Remember if this line is a continuation of already started list
is_next_item = (m and current_list
and current_indent == len(m.group(1)) // self.TAB_LENGTH)
if not is_next_item:
# There is no more items in the list we were processing
new_lines.extend(self.renumber(current_list))
current_list = []
if not m:
# Ordinary line
new_lines.append(line)
elif is_next_item:
# Another list item
current_list.append(m)
else:
# First list item
current_list = [m]
current_indent = len(m.group(1)) // self.TAB_LENGTH
new_lines.extend(self.renumber(current_list))
return new_lines
def renumber(self, mlist: List[Match[str]]) -> List[str]:
if not mlist:
return []
start_number = int(mlist[0].group(2))
# Change numbers only if every one is the same
change_numbers = True
for m in mlist:
if int(m.group(2)) != start_number:
change_numbers = False
break
lines = [] # type: List[str]
counter = start_number
for m in mlist:
number = str(counter) if change_numbers else m.group(2)
lines.append('%s%s. %s' % (m.group(1), number, m.group(3)))
counter += 1
return lines
# We need the following since upgrade from py-markdown 2.6.11 to 3.0.1
# modifies the link handling significantly. The following is taken from
# py-markdown 2.6.11 markdown/inlinepatterns.py.
@one_time
def get_link_re() -> str:
'''
Very important--if you need to change this code to depend on
any arguments, you must eliminate the "one_time" decorator
and consider performance implications. We only want to compute
this value once.
'''
NOBRACKET = r'[^\]\[]*'
BRK = (
r'\[(' +
(NOBRACKET + r'(\[')*6 +
(NOBRACKET + r'\])*')*6 +
NOBRACKET + r')\]'
)
NOIMG = r'(?<!\!)'
# [text](url) or [text](<url>) or [text](url "title")
LINK_RE = NOIMG + BRK + \
r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12\s*)?\)'''
return normal_compile(LINK_RE)
def prepare_realm_pattern(source: str) -> str:
""" Augment a realm filter so it only matches after start-of-string,
whitespace, or opening delimiters, won't match if there are word
characters directly after, and saves what was matched as "name". """
return r"""(?<![^\s'"\(,:<])(?P<name>""" + source + r')(?!\w)'
# Given a regular expression pattern, linkifies groups that match it
# using the provided format string to construct the URL.
class RealmFilterPattern(markdown.inlinepatterns.Pattern):
""" Applied a given realm filter to the input """
def __init__(self, source_pattern: str,
format_string: str,
markdown_instance: Optional[markdown.Markdown]=None) -> None:
self.pattern = prepare_realm_pattern(source_pattern)
self.format_string = format_string
markdown.inlinepatterns.Pattern.__init__(self, self.pattern, markdown_instance)
def handleMatch(self, m: Match[str]) -> Union[Element, str]:
db_data = self.markdown.zulip_db_data
return url_to_a(db_data,
self.format_string % m.groupdict(),
m.group("name"))
class UserMentionPattern(markdown.inlinepatterns.Pattern):
def handleMatch(self, m: Match[str]) -> Optional[Element]:
match = m.group('match')
silent = m.group('silent') == '_'
db_data = self.markdown.zulip_db_data
if self.markdown.zulip_message and db_data is not None:
if match.startswith("**") and match.endswith("**"):
name = match[2:-2]
else:
return None
wildcard = mention.user_mention_matches_wildcard(name)
id_syntax_match = re.match(r'.+\|(?P<user_id>\d+)$', name)
if id_syntax_match:
id = id_syntax_match.group("user_id")
user = db_data['mention_data'].get_user_by_id(id)
else:
user = db_data['mention_data'].get_user_by_name(name)
if wildcard:
self.markdown.zulip_message.mentions_wildcard = True
user_id = "*"
elif user:
if not silent:
self.markdown.zulip_message.mentions_user_ids.add(user['id'])
name = user['full_name']
user_id = str(user['id'])
else:
# Don't highlight @mentions that don't refer to a valid user
return None
el = markdown.util.etree.Element("span")
el.set('data-user-id', user_id)
if silent:
el.set('class', 'user-mention silent')
el.text = "%s" % (name,)
else:
el.set('class', 'user-mention')
el.text = "@%s" % (name,)
return el
return None
class UserGroupMentionPattern(markdown.inlinepatterns.Pattern):
def handleMatch(self, m: Match[str]) -> Optional[Element]:
match = m.group(2)
db_data = self.markdown.zulip_db_data
if self.markdown.zulip_message and db_data is not None:
name = extract_user_group(match)
user_group = db_data['mention_data'].get_user_group(name)
if user_group:
self.markdown.zulip_message.mentions_user_group_ids.add(user_group.id)
name = user_group.name
user_group_id = str(user_group.id)
else:
# Don't highlight @-mentions that don't refer to a valid user
# group.
return None
el = markdown.util.etree.Element("span")
el.set('class', 'user-group-mention')
el.set('data-user-group-id', user_group_id)
el.text = "@%s" % (name,)
return el
return None
class StreamPattern(CompiledPattern):
def find_stream_by_name(self, name: Match[str]) -> Optional[Dict[str, Any]]:
db_data = self.markdown.zulip_db_data
if db_data is None:
return None
stream = db_data['stream_names'].get(name)
return stream
def handleMatch(self, m: Match[str]) -> Optional[Element]:
name = m.group('stream_name')
if self.markdown.zulip_message:
stream = self.find_stream_by_name(name)
if stream is None:
return None
el = markdown.util.etree.Element('a')
el.set('class', 'stream')
el.set('data-stream-id', str(stream['id']))
# TODO: We should quite possibly not be specifying the
# href here and instead having the browser auto-add the
# href when it processes a message with one of these, to
# provide more clarity to API clients.
stream_url = encode_stream(stream['id'], name)
el.set('href', '/#narrow/stream/{stream_url}'.format(stream_url=stream_url))
el.text = '#{stream_name}'.format(stream_name=name)
return el
return None
def possible_linked_stream_names(content: str) -> Set[str]:
matches = re.findall(STREAM_LINK_REGEX, content, re.VERBOSE)
return set(matches)
class AlertWordsNotificationProcessor(markdown.preprocessors.Preprocessor):
def run(self, lines: Iterable[str]) -> Iterable[str]:
db_data = self.markdown.zulip_db_data
if self.markdown.zulip_message and db_data is not None:
# We check for alert words here, the set of which are
# dependent on which users may see this message.
#
# Our caller passes in the list of possible_words. We
# don't do any special rendering; we just append the alert words
# we find to the set self.markdown.zulip_message.alert_words.
realm_words = db_data['possible_words']
content = '\n'.join(lines).lower()
allowed_before_punctuation = "|".join([r'\s', '^', r'[\(\".,\';\[\*`>]'])
allowed_after_punctuation = "|".join([r'\s', '$', r'[\)\"\?:.,\';\]!\*`]'])
for word in realm_words:
escaped = re.escape(word.lower())
match_re = re.compile('(?:%s)%s(?:%s)' %
(allowed_before_punctuation,
escaped,
allowed_after_punctuation))
if re.search(match_re, content):
self.markdown.zulip_message.alert_words.add(word)
return lines
# This prevents realm_filters from running on the content of a
# Markdown link, breaking up the link. This is a monkey-patch, but it
# might be worth sending a version of this change upstream.
class AtomicLinkPattern(CompiledPattern):
def get_element(self, m: Match[str]) -> Optional[Element]:
href = m.group(9)
if not href:
return None
if href[0] == "<":
href = href[1:-1]
href = sanitize_url(self.unescape(href.strip()))
if href is None:
return None
db_data = self.markdown.zulip_db_data
href = rewrite_local_links_to_relative(db_data, href)
el = markdown.util.etree.Element('a')
el.text = m.group(2)
el.set('href', href)
fixup_link(el, target_blank=(href[:1] != '#'))
return el
def handleMatch(self, m: Match[str]) -> Optional[Element]:
ret = self.get_element(m)
if ret is None:
return None
if not isinstance(ret, str):
ret.text = markdown.util.AtomicString(ret.text)
return ret
def get_sub_registry(r: markdown.util.Registry, keys: List[str]) -> markdown.util.Registry:
# Registry is a new class added by py-markdown to replace Ordered List.
# Since Registry doesn't support .keys(), it is easier to make a new
# object instead of removing keys from the existing object.
new_r = markdown.util.Registry()
for k in keys:
new_r.register(r[k], k, r.get_index_for_name(k))
return new_r
# These are used as keys ("realm_filters_keys") to md_engines and the respective
# realm filter caches
DEFAULT_BUGDOWN_KEY = -1
ZEPHYR_MIRROR_BUGDOWN_KEY = -2
class Bugdown(markdown.Markdown):
def __init__(self, *args: Any, **kwargs: Union[bool, int, List[Any]]) -> None:
# define default configs
self.config = {
"realm_filters": [kwargs['realm_filters'],
"Realm-specific filters for realm_filters_key %s" % (kwargs['realm'],)],
"realm": [kwargs['realm'], "Realm id"],
"code_block_processor_disabled": [kwargs['code_block_processor_disabled'],
"Disabled for email gateway"]
}
super().__init__(*args, **kwargs)
self.set_output_format('html')
def build_parser(self) -> markdown.Markdown:
# Build the parser using selected default features from py-markdown.
# The complete list of all available processors can be found in the
# super().build_parser() function.
#
# Note: for any py-markdown updates, manually check if we want any
# of the new features added upstream or not; they wouldn't get
# included by default.
self.preprocessors = self.build_preprocessors()
self.parser = self.build_block_parser()
self.inlinePatterns = self.build_inlinepatterns()
self.treeprocessors = self.build_treeprocessors()
self.postprocessors = self.build_postprocessors()
self.handle_zephyr_mirror()
return self
def build_preprocessors(self) -> markdown.util.Registry:
# We disable the following preprocessors from upstream:
#
# html_block - insecure
# reference - references don't make sense in a chat context.
preprocessors = markdown.util.Registry()
preprocessors.register(AutoNumberOListPreprocessor(self), 'auto_number_olist', 40)
preprocessors.register(BugdownUListPreprocessor(self), 'hanging_ulists', 35)
preprocessors.register(markdown.preprocessors.NormalizeWhitespace(self), 'normalize_whitespace', 30)
preprocessors.register(fenced_code.FencedBlockPreprocessor(self), 'fenced_code_block', 25)
preprocessors.register(AlertWordsNotificationProcessor(self), 'custom_text_notifications', 20)
return preprocessors
def build_block_parser(self) -> markdown.util.Registry:
# We disable the following blockparsers from upstream:
#
# indent - replaced by ours
# hashheader - disabled, since headers look bad and don't make sense in a chat context.
# setextheader - disabled, since headers look bad and don't make sense in a chat context.
# olist - replaced by ours
# ulist - replaced by ours
# quote - replaced by ours
parser = markdown.blockprocessors.BlockParser(self)
parser.blockprocessors.register(markdown.blockprocessors.EmptyBlockProcessor(parser), 'empty', 85)
if not self.getConfig('code_block_processor_disabled'):
parser.blockprocessors.register(markdown.blockprocessors.CodeBlockProcessor(parser), 'code', 80)
# We get priority 75 from 'table' extension
parser.blockprocessors.register(markdown.blockprocessors.HRProcessor(parser), 'hr', 70)
parser.blockprocessors.register(UListProcessor(parser), 'ulist', 65)
parser.blockprocessors.register(ListIndentProcessor(parser), 'indent', 60)
parser.blockprocessors.register(BlockQuoteProcessor(parser), 'quote', 55)
parser.blockprocessors.register(markdown.blockprocessors.ParagraphProcessor(parser), 'paragraph', 50)
return parser
def build_inlinepatterns(self) -> markdown.util.Registry:
# We disable the following upstream inline patterns:
#
# backtick - replaced by ours
# escape - probably will re-add at some point.
# link - replaced by ours
# image_link - replaced by ours
# autolink - replaced by ours
# automail - replaced by ours
# linebreak - we use nl2br and consider that good enough
# html - insecure
# reference - references not useful
# image_reference - references not useful
# short_reference - references not useful
# ---------------------------------------------------
# strong_em - for these three patterns,
# strong2 - we have our own versions where
# emphasis2 - we disable _ for bold and emphasis
# Declare regexes for clean single line calls to .register().
NOT_STRONG_RE = markdown.inlinepatterns.NOT_STRONG_RE
# Custom strikethrough syntax: ~~foo~~
DEL_RE = r'(?<!~)(\~\~)([^~\n]+?)(\~\~)(?!~)'
# Custom bold syntax: **foo** but not __foo__
# str inside ** must start and end with a word character
# it need for things like "const char *x = (char *)y"
EMPHASIS_RE = r'(\*)(?!\s+)([^\*^\n]+)(?<!\s)\*'
ENTITY_RE = markdown.inlinepatterns.ENTITY_RE
STRONG_EM_RE = r'(\*\*\*)(?!\s+)([^\*^\n]+)(?<!\s)\*\*\*'
# Inline code block without whitespace stripping
BACKTICK_RE = r'(?:(?<!\\)((?:\\{2})+)(?=`+)|(?<!\\)(`+)(.+?)(?<!`)\3(?!`))'
# Add Inline Patterns. We use a custom numbering of the
# rules, that preserves the order from upstream but leaves
# space for us to add our own.
reg = markdown.util.Registry()
reg.register(BacktickPattern(BACKTICK_RE), 'backtick', 105)
reg.register(markdown.inlinepatterns.DoubleTagPattern(STRONG_EM_RE, 'strong,em'), 'strong_em', 100)
reg.register(UserMentionPattern(mention.find_mentions, self), 'usermention', 95)
reg.register(Tex(r'\B(?<!\$)\$\$(?P<body>[^\n_$](\\\$|[^$\n])*)\$\$(?!\$)\B'), 'tex', 90)
reg.register(StreamPattern(get_compiled_stream_link_regex(), self), 'stream', 85)
reg.register(Avatar(AVATAR_REGEX, self), 'avatar', 80)
reg.register(ModalLink(r'!modal_link\((?P<relative_url>[^)]*), (?P<text>[^)]*)\)'), 'modal_link', 75)
# Note that !gravatar syntax should be deprecated long term.
reg.register(Avatar(GRAVATAR_REGEX, self), 'gravatar', 70)
reg.register(UserGroupMentionPattern(mention.user_group_mentions, self), 'usergroupmention', 65)
reg.register(AtomicLinkPattern(get_link_re(), self), 'link', 60)
reg.register(AutoLink(get_web_link_regex(), self), 'autolink', 55)
# Reserve priority 45-54 for Realm Filters
reg = self.register_realm_filters(reg)
reg.register(markdown.inlinepatterns.HtmlInlineProcessor(ENTITY_RE, self), 'entity', 40)
reg.register(markdown.inlinepatterns.SimpleTagPattern(r'(\*\*)([^\n]+?)\2', 'strong'), 'strong', 35)
reg.register(markdown.inlinepatterns.SimpleTagPattern(EMPHASIS_RE, 'em'), 'emphasis', 30)
reg.register(markdown.inlinepatterns.SimpleTagPattern(DEL_RE, 'del'), 'del', 25)
reg.register(markdown.inlinepatterns.SimpleTextInlineProcessor(NOT_STRONG_RE), 'not_strong', 20)
reg.register(Emoji(EMOJI_REGEX, self), 'emoji', 15)
reg.register(EmoticonTranslation(emoticon_regex, self), 'translate_emoticons', 10)
# We get priority 5 from 'nl2br' extension
reg.register(UnicodeEmoji(unicode_emoji_regex), 'unicodeemoji', 0)
return reg
def register_realm_filters(self, inlinePatterns: markdown.util.Registry) -> markdown.util.Registry:
for (pattern, format_string, id) in self.getConfig("realm_filters"):
inlinePatterns.register(RealmFilterPattern(pattern, format_string, self),
'realm_filters/%s' % (pattern), 45)
return inlinePatterns
def build_treeprocessors(self) -> markdown.util.Registry:
# Here we build all the processors from upstream, plus a few of our own.
treeprocessors = markdown.util.Registry()
# We get priority 30 from 'hilite' extension
treeprocessors.register(markdown.treeprocessors.InlineProcessor(self), 'inline', 25)
treeprocessors.register(markdown.treeprocessors.PrettifyTreeprocessor(self), 'prettify', 20)
treeprocessors.register(InlineInterestingLinkProcessor(self), 'inline_interesting_links', 15)
if settings.CAMO_URI:
treeprocessors.register(InlineHttpsProcessor(self), 'rewrite_to_https', 10)
return treeprocessors
def build_postprocessors(self) -> markdown.util.Registry:
# These are the default python-markdown processors, unmodified.
postprocessors = markdown.util.Registry()
postprocessors.register(markdown.postprocessors.RawHtmlPostprocessor(self), 'raw_html', 20)
postprocessors.register(markdown.postprocessors.AndSubstitutePostprocessor(), 'amp_substitute', 15)
postprocessors.register(markdown.postprocessors.UnescapePostprocessor(), 'unescape', 10)
return postprocessors
def getConfig(self, key: str, default: str='') -> Any:
""" Return a setting for the given key or an empty string. """
if key in self.config:
return self.config[key][0]
else:
return default
def handle_zephyr_mirror(self) -> None:
if self.getConfig("realm") == ZEPHYR_MIRROR_BUGDOWN_KEY:
# Disable almost all inline patterns for zephyr mirror
# users' traffic that is mirrored. Note that
# inline_interesting_links is a treeprocessor and thus is
# not removed
self.inlinePatterns = get_sub_registry(self.inlinePatterns, ['autolink'])
self.treeprocessors = get_sub_registry(self.treeprocessors, ['inline_interesting_links',
'rewrite_to_https'])
# insert new 'inline' processor because we have changed self.inlinePatterns
# but InlineProcessor copies md as self.md in __init__.
self.treeprocessors.register(markdown.treeprocessors.InlineProcessor(self), 'inline', 25)
self.preprocessors = get_sub_registry(self.preprocessors, ['custom_text_notifications'])
self.parser.blockprocessors = get_sub_registry(self.parser.blockprocessors, ['paragraph'])
md_engines = {} # type: Dict[Tuple[int, bool], markdown.Markdown]
realm_filter_data = {} # type: Dict[int, List[Tuple[str, str, int]]]
def make_md_engine(realm_filters_key: int, email_gateway: bool) -> None:
md_engine_key = (realm_filters_key, email_gateway)
if md_engine_key in md_engines:
del md_engines[md_engine_key]
realm_filters = realm_filter_data[realm_filters_key]
md_engines[md_engine_key] = build_engine(
realm_filters=realm_filters,
realm_filters_key=realm_filters_key,
email_gateway=email_gateway,
)
def build_engine(realm_filters: List[Tuple[str, str, int]],
realm_filters_key: int,
email_gateway: bool) -> markdown.Markdown:
engine = Bugdown(
realm_filters=realm_filters,
realm=realm_filters_key,
code_block_processor_disabled=email_gateway,
extensions = [
nl2br.makeExtension(),
tables.makeExtension(),
codehilite.makeExtension(
linenums=False,
guess_lang=False
),
])
return engine
def topic_links(realm_filters_key: int, topic_name: str) -> List[str]:
matches = [] # type: List[str]
realm_filters = realm_filters_for_realm(realm_filters_key)
for realm_filter in realm_filters:
pattern = prepare_realm_pattern(realm_filter[0])
for m in re.finditer(pattern, topic_name):
matches += [realm_filter[1] % m.groupdict()]
return matches
def maybe_update_markdown_engines(realm_filters_key: Optional[int], email_gateway: bool) -> None:
# If realm_filters_key is None, load all filters
global realm_filter_data
if realm_filters_key is None:
all_filters = all_realm_filters()
all_filters[DEFAULT_BUGDOWN_KEY] = []
for realm_filters_key, filters in all_filters.items():
realm_filter_data[realm_filters_key] = filters
make_md_engine(realm_filters_key, email_gateway)
# Hack to ensure that getConfig("realm") is right for mirrored Zephyrs
realm_filter_data[ZEPHYR_MIRROR_BUGDOWN_KEY] = []
make_md_engine(ZEPHYR_MIRROR_BUGDOWN_KEY, False)
else:
realm_filters = realm_filters_for_realm(realm_filters_key)
if realm_filters_key not in realm_filter_data or \
realm_filter_data[realm_filters_key] != realm_filters:
# Realm filters data has changed, update `realm_filter_data` and any
# of the existing markdown engines using this set of realm filters.
realm_filter_data[realm_filters_key] = realm_filters
for email_gateway_flag in [True, False]:
if (realm_filters_key, email_gateway_flag) in md_engines:
# Update only existing engines(if any), don't create new one.
make_md_engine(realm_filters_key, email_gateway_flag)
if (realm_filters_key, email_gateway) not in md_engines:
# Markdown engine corresponding to this key doesn't exists so create one.
make_md_engine(realm_filters_key, email_gateway)
# We want to log Markdown parser failures, but shouldn't log the actual input
# message for privacy reasons. The compromise is to replace all alphanumeric
# characters with 'x'.
#
# We also use repr() to improve reproducibility, and to escape terminal control
# codes, which can do surprisingly nasty things.
_privacy_re = re.compile('\\w', flags=re.UNICODE)
def privacy_clean_markdown(content: str) -> str:
return repr(_privacy_re.sub('x', content))
def log_bugdown_error(msg: str) -> None:
"""We use this unusual logging approach to log the bugdown error, in
order to prevent AdminNotifyHandler from sending the santized
original markdown formatting into another Zulip message, which
could cause an infinite exception loop."""
bugdown_logger.error(msg)
def get_email_info(realm_id: int, emails: Set[str]) -> Dict[str, FullNameInfo]:
if not emails:
return dict()
q_list = {
Q(email__iexact=email.strip().lower())
for email in emails
}
rows = UserProfile.objects.filter(
realm_id=realm_id
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'email',
)
dct = {
row['email'].strip().lower(): row
for row in rows
}
return dct
def get_possible_mentions_info(realm_id: int, mention_texts: Set[str]) -> List[FullNameInfo]:
if not mention_texts:
return list()
# Remove the trailing part of the `name|id` mention syntax,
# thus storing only full names in full_names.
full_names = set()
name_re = r'(?P<full_name>.+)\|\d+$'
for mention_text in mention_texts:
name_syntax_match = re.match(name_re, mention_text)
if name_syntax_match:
full_names.add(name_syntax_match.group("full_name"))
else:
full_names.add(mention_text)
q_list = {
Q(full_name__iexact=full_name)
for full_name in full_names
}
rows = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'full_name',
'email',
)
return list(rows)
class MentionData:
def __init__(self, realm_id: int, content: str) -> None:
mention_texts = possible_mentions(content)
possible_mentions_info = get_possible_mentions_info(realm_id, mention_texts)
self.full_name_info = {
row['full_name'].lower(): row
for row in possible_mentions_info
}
self.user_id_info = {
row['id']: row
for row in possible_mentions_info
}
self.init_user_group_data(realm_id=realm_id, content=content)
def init_user_group_data(self,
realm_id: int,
content: str) -> None:
user_group_names = possible_user_group_mentions(content)
self.user_group_name_info = get_user_group_name_info(realm_id, user_group_names)
self.user_group_members = defaultdict(list) # type: Dict[int, List[int]]
group_ids = [group.id for group in self.user_group_name_info.values()]
if not group_ids:
# Early-return to avoid the cost of hitting the ORM,
# which shows up in profiles.
return
membership = UserGroupMembership.objects.filter(user_group_id__in=group_ids)
for info in membership.values('user_group_id', 'user_profile_id'):
group_id = info['user_group_id']
user_profile_id = info['user_profile_id']
self.user_group_members[group_id].append(user_profile_id)
def get_user_by_name(self, name: str) -> Optional[FullNameInfo]:
# warning: get_user_by_name is not dependable if two
# users of the same full name are mentioned. Use
# get_user_by_id where possible.
return self.full_name_info.get(name.lower(), None)
def get_user_by_id(self, id: str) -> Optional[FullNameInfo]:
return self.user_id_info.get(int(id), None)
def get_user_ids(self) -> Set[int]:
"""
Returns the user IDs that might have been mentioned by this
content. Note that because this data structure has not parsed
the message and does not know about escaping/code blocks, this
will overestimate the list of user ids.
"""
return set(self.user_id_info.keys())
def get_user_group(self, name: str) -> Optional[UserGroup]:
return self.user_group_name_info.get(name.lower(), None)
def get_group_members(self, user_group_id: int) -> List[int]:
return self.user_group_members.get(user_group_id, [])
def get_user_group_name_info(realm_id: int, user_group_names: Set[str]) -> Dict[str, UserGroup]:
if not user_group_names:
return dict()
rows = UserGroup.objects.filter(realm_id=realm_id,
name__in=user_group_names)
dct = {row.name.lower(): row for row in rows}
return dct
def get_stream_name_info(realm: Realm, stream_names: Set[str]) -> Dict[str, FullNameInfo]:
if not stream_names:
return dict()
q_list = {
Q(name=name)
for name in stream_names
}
rows = get_active_streams(
realm=realm,
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'name',
)
dct = {
row['name']: row
for row in rows
}
return dct
def do_convert(content: str,
message: Optional[Message]=None,
message_realm: Optional[Realm]=None,
possible_words: Optional[Set[str]]=None,
sent_by_bot: Optional[bool]=False,
translate_emoticons: Optional[bool]=False,
mention_data: Optional[MentionData]=None,
email_gateway: Optional[bool]=False,
no_previews: Optional[bool]=False) -> str:
"""Convert Markdown to HTML, with Zulip-specific settings and hacks."""
# This logic is a bit convoluted, but the overall goal is to support a range of use cases:
# * Nothing is passed in other than content -> just run default options (e.g. for docs)
# * message is passed, but no realm is -> look up realm from message
# * message_realm is passed -> use that realm for bugdown purposes
if message is not None:
if message_realm is None:
message_realm = message.get_realm()
if message_realm is None:
realm_filters_key = DEFAULT_BUGDOWN_KEY
else:
realm_filters_key = message_realm.id
if message and hasattr(message, 'id') and message.id:
logging_message_id = 'id# ' + str(message.id)
else:
logging_message_id = 'unknown'
if message is not None and message_realm is not None:
if message_realm.is_zephyr_mirror_realm:
if message.sending_client.name == "zephyr_mirror":
# Use slightly customized Markdown processor for content
# delivered via zephyr_mirror
realm_filters_key = ZEPHYR_MIRROR_BUGDOWN_KEY
maybe_update_markdown_engines(realm_filters_key, email_gateway)
md_engine_key = (realm_filters_key, email_gateway)
if md_engine_key in md_engines:
_md_engine = md_engines[md_engine_key]
else:
if DEFAULT_BUGDOWN_KEY not in md_engines:
maybe_update_markdown_engines(realm_filters_key=None, email_gateway=False)
_md_engine = md_engines[(DEFAULT_BUGDOWN_KEY, email_gateway)]
# Reset the parser; otherwise it will get slower over time.
_md_engine.reset()
# Filters such as UserMentionPattern need a message.
_md_engine.zulip_message = message
_md_engine.zulip_realm = message_realm
_md_engine.zulip_db_data = None # for now
_md_engine.image_preview_enabled = image_preview_enabled(
message, message_realm, no_previews)
_md_engine.url_embed_preview_enabled = url_embed_preview_enabled(
message, message_realm, no_previews)
# Pre-fetch data from the DB that is used in the bugdown thread
if message is not None:
assert message_realm is not None # ensured above if message is not None
if possible_words is None:
possible_words = set() # Set[str]
# Here we fetch the data structures needed to render
# mentions/avatars/stream mentions from the database, but only
# if there is syntax in the message that might use them, since
# the fetches are somewhat expensive and these types of syntax
# are uncommon enough that it's a useful optimization.
if mention_data is None:
mention_data = MentionData(message_realm.id, content)
emails = possible_avatar_emails(content)
email_info = get_email_info(message_realm.id, emails)
stream_names = possible_linked_stream_names(content)
stream_name_info = get_stream_name_info(message_realm, stream_names)
if content_has_emoji_syntax(content):
active_realm_emoji = message_realm.get_active_emoji()
else:
active_realm_emoji = dict()
_md_engine.zulip_db_data = {
'possible_words': possible_words,
'email_info': email_info,
'mention_data': mention_data,
'active_realm_emoji': active_realm_emoji,
'realm_uri': message_realm.uri,
'sent_by_bot': sent_by_bot,
'stream_names': stream_name_info,
'translate_emoticons': translate_emoticons,
}
try:
# Spend at most 5 seconds rendering; this protects the backend
# from being overloaded by bugs (e.g. markdown logic that is
# extremely inefficient in corner cases) as well as user
# errors (e.g. a realm filter that makes some syntax
# infinite-loop).
rendered_content = timeout(5, _md_engine.convert, content)
# Throw an exception if the content is huge; this protects the
# rest of the codebase from any bugs where we end up rendering
# something huge.
if len(rendered_content) > MAX_MESSAGE_LENGTH * 10:
raise BugdownRenderingException('Rendered content exceeds %s characters (message %s)' %
(MAX_MESSAGE_LENGTH * 10, logging_message_id))
return rendered_content
except Exception:
cleaned = privacy_clean_markdown(content)
# NOTE: Don't change this message without also changing the
# logic in logging_handlers.py or we can create recursive
# exceptions.
exception_message = ('Exception in Markdown parser: %sInput (sanitized) was: %s\n (message %s)'
% (traceback.format_exc(), cleaned, logging_message_id))
bugdown_logger.exception(exception_message)
raise BugdownRenderingException()
finally:
# These next three lines are slightly paranoid, since
# we always set these right before actually using the
# engine, but better safe then sorry.
_md_engine.zulip_message = None
_md_engine.zulip_realm = None
_md_engine.zulip_db_data = None
bugdown_time_start = 0.0
bugdown_total_time = 0.0
bugdown_total_requests = 0
def get_bugdown_time() -> float:
return bugdown_total_time
def get_bugdown_requests() -> int:
return bugdown_total_requests
def bugdown_stats_start() -> None:
global bugdown_time_start
bugdown_time_start = time.time()
def bugdown_stats_finish() -> None:
global bugdown_total_time
global bugdown_total_requests
global bugdown_time_start
bugdown_total_requests += 1
bugdown_total_time += (time.time() - bugdown_time_start)
def convert(content: str,
message: Optional[Message]=None,
message_realm: Optional[Realm]=None,
possible_words: Optional[Set[str]]=None,
sent_by_bot: Optional[bool]=False,
translate_emoticons: Optional[bool]=False,
mention_data: Optional[MentionData]=None,
email_gateway: Optional[bool]=False,
no_previews: Optional[bool]=False) -> str:
bugdown_stats_start()
ret = do_convert(content, message, message_realm,
possible_words, sent_by_bot, translate_emoticons,
mention_data, email_gateway, no_previews=no_previews)
bugdown_stats_finish()
return ret
| ./CrossVul/dataset_final_sorted/CWE-400/py/bad_1090_0 |
crossvul-python_data_bad_1886_0 | """
SVG colors.
"""
import re
COLORS = {
'aliceblue': (240 / 255, 248 / 255, 255 / 255, 1),
'antiquewhite': (250 / 255, 235 / 255, 215 / 255, 1),
'aqua': (0 / 255, 255 / 255, 255 / 255, 1),
'aquamarine': (127 / 255, 255 / 255, 212 / 255, 1),
'azure': (240 / 255, 255 / 255, 255 / 255, 1),
'beige': (245 / 255, 245 / 255, 220 / 255, 1),
'bisque': (255 / 255, 228 / 255, 196 / 255, 1),
'black': (0 / 255, 0 / 255, 0 / 255, 1),
'blanchedalmond': (255 / 255, 235 / 255, 205 / 255, 1),
'blue': (0 / 255, 0 / 255, 255 / 255, 1),
'blueviolet': (138 / 255, 43 / 255, 226 / 255, 1),
'brown': (165 / 255, 42 / 255, 42 / 255, 1),
'burlywood': (222 / 255, 184 / 255, 135 / 255, 1),
'cadetblue': (95 / 255, 158 / 255, 160 / 255, 1),
'chartreuse': (127 / 255, 255 / 255, 0 / 255, 1),
'chocolate': (210 / 255, 105 / 255, 30 / 255, 1),
'coral': (255 / 255, 127 / 255, 80 / 255, 1),
'cornflowerblue': (100 / 255, 149 / 255, 237 / 255, 1),
'cornsilk': (255 / 255, 248 / 255, 220 / 255, 1),
'crimson': (220 / 255, 20 / 255, 60 / 255, 1),
'cyan': (0 / 255, 255 / 255, 255 / 255, 1),
'darkblue': (0 / 255, 0 / 255, 139 / 255, 1),
'darkcyan': (0 / 255, 139 / 255, 139 / 255, 1),
'darkgoldenrod': (184 / 255, 134 / 255, 11 / 255, 1),
'darkgray': (169 / 255, 169 / 255, 169 / 255, 1),
'darkgreen': (0 / 255, 100 / 255, 0 / 255, 1),
'darkgrey': (169 / 255, 169 / 255, 169 / 255, 1),
'darkkhaki': (189 / 255, 183 / 255, 107 / 255, 1),
'darkmagenta': (139 / 255, 0 / 255, 139 / 255, 1),
'darkolivegreen': (85 / 255, 107 / 255, 47 / 255, 1),
'darkorange': (255 / 255, 140 / 255, 0 / 255, 1),
'darkorchid': (153 / 255, 50 / 255, 204 / 255, 1),
'darkred': (139 / 255, 0 / 255, 0 / 255, 1),
'darksalmon': (233 / 255, 150 / 255, 122 / 255, 1),
'darkseagreen': (143 / 255, 188 / 255, 143 / 255, 1),
'darkslateblue': (72 / 255, 61 / 255, 139 / 255, 1),
'darkslategray': (47 / 255, 79 / 255, 79 / 255, 1),
'darkslategrey': (47 / 255, 79 / 255, 79 / 255, 1),
'darkturquoise': (0 / 255, 206 / 255, 209 / 255, 1),
'darkviolet': (148 / 255, 0 / 255, 211 / 255, 1),
'deeppink': (255 / 255, 20 / 255, 147 / 255, 1),
'deepskyblue': (0 / 255, 191 / 255, 255 / 255, 1),
'dimgray': (105 / 255, 105 / 255, 105 / 255, 1),
'dimgrey': (105 / 255, 105 / 255, 105 / 255, 1),
'dodgerblue': (30 / 255, 144 / 255, 255 / 255, 1),
'firebrick': (178 / 255, 34 / 255, 34 / 255, 1),
'floralwhite': (255 / 255, 250 / 255, 240 / 255, 1),
'forestgreen': (34 / 255, 139 / 255, 34 / 255, 1),
'fuchsia': (255 / 255, 0 / 255, 255 / 255, 1),
'gainsboro': (220 / 255, 220 / 255, 220 / 255, 1),
'ghostwhite': (248 / 255, 248 / 255, 255 / 255, 1),
'gold': (255 / 255, 215 / 255, 0 / 255, 1),
'goldenrod': (218 / 255, 165 / 255, 32 / 255, 1),
'gray': (128 / 255, 128 / 255, 128 / 255, 1),
'grey': (128 / 255, 128 / 255, 128 / 255, 1),
'green': (0 / 255, 128 / 255, 0 / 255, 1),
'greenyellow': (173 / 255, 255 / 255, 47 / 255, 1),
'honeydew': (240 / 255, 255 / 255, 240 / 255, 1),
'hotpink': (255 / 255, 105 / 255, 180 / 255, 1),
'indianred': (205 / 255, 92 / 255, 92 / 255, 1),
'indigo': (75 / 255, 0 / 255, 130 / 255, 1),
'ivory': (255 / 255, 255 / 255, 240 / 255, 1),
'khaki': (240 / 255, 230 / 255, 140 / 255, 1),
'lavender': (230 / 255, 230 / 255, 250 / 255, 1),
'lavenderblush': (255 / 255, 240 / 255, 245 / 255, 1),
'lawngreen': (124 / 255, 252 / 255, 0 / 255, 1),
'lemonchiffon': (255 / 255, 250 / 255, 205 / 255, 1),
'lightblue': (173 / 255, 216 / 255, 230 / 255, 1),
'lightcoral': (240 / 255, 128 / 255, 128 / 255, 1),
'lightcyan': (224 / 255, 255 / 255, 255 / 255, 1),
'lightgoldenrodyellow': (250 / 255, 250 / 255, 210 / 255, 1),
'lightgray': (211 / 255, 211 / 255, 211 / 255, 1),
'lightgreen': (144 / 255, 238 / 255, 144 / 255, 1),
'lightgrey': (211 / 255, 211 / 255, 211 / 255, 1),
'lightpink': (255 / 255, 182 / 255, 193 / 255, 1),
'lightsalmon': (255 / 255, 160 / 255, 122 / 255, 1),
'lightseagreen': (32 / 255, 178 / 255, 170 / 255, 1),
'lightskyblue': (135 / 255, 206 / 255, 250 / 255, 1),
'lightslategray': (119 / 255, 136 / 255, 153 / 255, 1),
'lightslategrey': (119 / 255, 136 / 255, 153 / 255, 1),
'lightsteelblue': (176 / 255, 196 / 255, 222 / 255, 1),
'lightyellow': (255 / 255, 255 / 255, 224 / 255, 1),
'lime': (0 / 255, 255 / 255, 0 / 255, 1),
'limegreen': (50 / 255, 205 / 255, 50 / 255, 1),
'linen': (250 / 255, 240 / 255, 230 / 255, 1),
'magenta': (255 / 255, 0 / 255, 255 / 255, 1),
'maroon': (128 / 255, 0 / 255, 0 / 255, 1),
'mediumaquamarine': (102 / 255, 205 / 255, 170 / 255, 1),
'mediumblue': (0 / 255, 0 / 255, 205 / 255, 1),
'mediumorchid': (186 / 255, 85 / 255, 211 / 255, 1),
'mediumpurple': (147 / 255, 112 / 255, 219 / 255, 1),
'mediumseagreen': (60 / 255, 179 / 255, 113 / 255, 1),
'mediumslateblue': (123 / 255, 104 / 255, 238 / 255, 1),
'mediumspringgreen': (0 / 255, 250 / 255, 154 / 255, 1),
'mediumturquoise': (72 / 255, 209 / 255, 204 / 255, 1),
'mediumvioletred': (199 / 255, 21 / 255, 133 / 255, 1),
'midnightblue': (25 / 255, 25 / 255, 112 / 255, 1),
'mintcream': (245 / 255, 255 / 255, 250 / 255, 1),
'mistyrose': (255 / 255, 228 / 255, 225 / 255, 1),
'moccasin': (255 / 255, 228 / 255, 181 / 255, 1),
'navajowhite': (255 / 255, 222 / 255, 173 / 255, 1),
'navy': (0 / 255, 0 / 255, 128 / 255, 1),
'oldlace': (253 / 255, 245 / 255, 230 / 255, 1),
'olive': (128 / 255, 128 / 255, 0 / 255, 1),
'olivedrab': (107 / 255, 142 / 255, 35 / 255, 1),
'orange': (255 / 255, 165 / 255, 0 / 255, 1),
'orangered': (255 / 255, 69 / 255, 0 / 255, 1),
'orchid': (218 / 255, 112 / 255, 214 / 255, 1),
'palegoldenrod': (238 / 255, 232 / 255, 170 / 255, 1),
'palegreen': (152 / 255, 251 / 255, 152 / 255, 1),
'paleturquoise': (175 / 255, 238 / 255, 238 / 255, 1),
'palevioletred': (219 / 255, 112 / 255, 147 / 255, 1),
'papayawhip': (255 / 255, 239 / 255, 213 / 255, 1),
'peachpuff': (255 / 255, 218 / 255, 185 / 255, 1),
'peru': (205 / 255, 133 / 255, 63 / 255, 1),
'pink': (255 / 255, 192 / 255, 203 / 255, 1),
'plum': (221 / 255, 160 / 255, 221 / 255, 1),
'powderblue': (176 / 255, 224 / 255, 230 / 255, 1),
'purple': (128 / 255, 0 / 255, 128 / 255, 1),
'red': (255 / 255, 0 / 255, 0 / 255, 1),
'rosybrown': (188 / 255, 143 / 255, 143 / 255, 1),
'royalblue': (65 / 255, 105 / 255, 225 / 255, 1),
'saddlebrown': (139 / 255, 69 / 255, 19 / 255, 1),
'salmon': (250 / 255, 128 / 255, 114 / 255, 1),
'sandybrown': (244 / 255, 164 / 255, 96 / 255, 1),
'seagreen': (46 / 255, 139 / 255, 87 / 255, 1),
'seashell': (255 / 255, 245 / 255, 238 / 255, 1),
'sienna': (160 / 255, 82 / 255, 45 / 255, 1),
'silver': (192 / 255, 192 / 255, 192 / 255, 1),
'skyblue': (135 / 255, 206 / 255, 235 / 255, 1),
'slateblue': (106 / 255, 90 / 255, 205 / 255, 1),
'slategray': (112 / 255, 128 / 255, 144 / 255, 1),
'slategrey': (112 / 255, 128 / 255, 144 / 255, 1),
'snow': (255 / 255, 250 / 255, 250 / 255, 1),
'springgreen': (0 / 255, 255 / 255, 127 / 255, 1),
'steelblue': (70 / 255, 130 / 255, 180 / 255, 1),
'tan': (210 / 255, 180 / 255, 140 / 255, 1),
'teal': (0 / 255, 128 / 255, 128 / 255, 1),
'thistle': (216 / 255, 191 / 255, 216 / 255, 1),
'tomato': (255 / 255, 99 / 255, 71 / 255, 1),
'turquoise': (64 / 255, 224 / 255, 208 / 255, 1),
'violet': (238 / 255, 130 / 255, 238 / 255, 1),
'wheat': (245 / 255, 222 / 255, 179 / 255, 1),
'white': (255 / 255, 255 / 255, 255 / 255, 1),
'whitesmoke': (245 / 255, 245 / 255, 245 / 255, 1),
'yellow': (255 / 255, 255 / 255, 0 / 255, 1),
'yellowgreen': (154 / 255, 205 / 255, 50 / 255, 1),
'activeborder': (0, 0, 1, 1),
'activecaption': (0, 0, 1, 1),
'appworkspace': (1, 1, 1, 1),
'background': (1, 1, 1, 1),
'buttonface': (0, 0, 0, 1),
'buttonhighlight': (0.8, 0.8, 0.8, 1),
'buttonshadow': (0.2, 0.2, 0.2, 1),
'buttontext': (0, 0, 0, 1),
'captiontext': (0, 0, 0, 1),
'graytext': (0.2, 0.2, 0.2, 1),
'highlight': (0, 0, 1, 1),
'highlighttext': (0.8, 0.8, 0.8, 1),
'inactiveborder': (0.2, 0.2, 0.2, 1),
'inactivecaption': (0.8, 0.8, 0.8, 1),
'inactivecaptiontext': (0.2, 0.2, 0.2, 1),
'infobackground': (0.8, 0.8, 0.8, 1),
'infotext': (0, 0, 0, 1),
'menu': (0.8, 0.8, 0.8, 1),
'menutext': (0.2, 0.2, 0.2, 1),
'scrollbar': (0.8, 0.8, 0.8, 1),
'threeddarkshadow': (0.2, 0.2, 0.2, 1),
'threedface': (0.8, 0.8, 0.8, 1),
'threedhighlight': (1, 1, 1, 1),
'threedlightshadow': (0.2, 0.2, 0.2, 1),
'threedshadow': (0.2, 0.2, 0.2, 1),
'window': (0.8, 0.8, 0.8, 1),
'windowframe': (0.8, 0.8, 0.8, 1),
'windowtext': (0, 0, 0, 1),
'none': (0, 0, 0, 0),
'transparent': (0, 0, 0, 0),
}
RGBA = re.compile(r'rgba\([ \n\r\t]*(.+?)[ \n\r\t]*\)')
RGB = re.compile(r'rgb\([ \n\r\t]*(.+?)[ \n\r\t]*\)')
HEX_RRGGBB = re.compile('#[0-9a-f]{6}')
HEX_RGB = re.compile('#[0-9a-f]{3}')
def color(string, opacity=1):
"""Replace ``string`` representing a color by a RGBA tuple.
See http://www.w3.org/TR/SVG/types.html#DataTypeColor
"""
if not string:
return (0, 0, 0, 0)
string = string.strip().lower()
if string in COLORS:
r, g, b, a = COLORS[string]
return (r, g, b, a * opacity)
match = RGBA.search(string)
if match:
r, g, b, a = tuple(
float(i.strip(' %')) / 100 if '%' in i else float(i) / 255
for i in match.group(1).split(','))
return (r, g, b, a * 255 * opacity)
match = RGB.search(string)
if match:
r, g, b = tuple(
float(i.strip(' %')) / 100 if '%' in i else float(i) / 255
for i in match.group(1).split(','))
return (r, g, b, opacity)
match = HEX_RRGGBB.search(string)
if match:
plain_color = tuple(
int(value, 16) / 255 for value in (
string[1:3], string[3:5], string[5:7]))
return plain_color + (opacity,)
match = HEX_RGB.search(string)
if match:
plain_color = tuple(
int(value, 16) / 15 for value in (
string[1], string[2], string[3]))
return plain_color + (opacity,)
return (0, 0, 0, 1)
def negate_color(rgba_tuple):
"""Replace ``rgba_tuple`` with its complementary color."""
r, g, b, a = rgba_tuple
return (1 - r, 1 - g, 1 - b, a)
| ./CrossVul/dataset_final_sorted/CWE-400/py/bad_1886_0 |
crossvul-python_data_good_1886_0 | """
SVG colors.
"""
import re
COLORS = {
'aliceblue': (240 / 255, 248 / 255, 255 / 255, 1),
'antiquewhite': (250 / 255, 235 / 255, 215 / 255, 1),
'aqua': (0 / 255, 255 / 255, 255 / 255, 1),
'aquamarine': (127 / 255, 255 / 255, 212 / 255, 1),
'azure': (240 / 255, 255 / 255, 255 / 255, 1),
'beige': (245 / 255, 245 / 255, 220 / 255, 1),
'bisque': (255 / 255, 228 / 255, 196 / 255, 1),
'black': (0 / 255, 0 / 255, 0 / 255, 1),
'blanchedalmond': (255 / 255, 235 / 255, 205 / 255, 1),
'blue': (0 / 255, 0 / 255, 255 / 255, 1),
'blueviolet': (138 / 255, 43 / 255, 226 / 255, 1),
'brown': (165 / 255, 42 / 255, 42 / 255, 1),
'burlywood': (222 / 255, 184 / 255, 135 / 255, 1),
'cadetblue': (95 / 255, 158 / 255, 160 / 255, 1),
'chartreuse': (127 / 255, 255 / 255, 0 / 255, 1),
'chocolate': (210 / 255, 105 / 255, 30 / 255, 1),
'coral': (255 / 255, 127 / 255, 80 / 255, 1),
'cornflowerblue': (100 / 255, 149 / 255, 237 / 255, 1),
'cornsilk': (255 / 255, 248 / 255, 220 / 255, 1),
'crimson': (220 / 255, 20 / 255, 60 / 255, 1),
'cyan': (0 / 255, 255 / 255, 255 / 255, 1),
'darkblue': (0 / 255, 0 / 255, 139 / 255, 1),
'darkcyan': (0 / 255, 139 / 255, 139 / 255, 1),
'darkgoldenrod': (184 / 255, 134 / 255, 11 / 255, 1),
'darkgray': (169 / 255, 169 / 255, 169 / 255, 1),
'darkgreen': (0 / 255, 100 / 255, 0 / 255, 1),
'darkgrey': (169 / 255, 169 / 255, 169 / 255, 1),
'darkkhaki': (189 / 255, 183 / 255, 107 / 255, 1),
'darkmagenta': (139 / 255, 0 / 255, 139 / 255, 1),
'darkolivegreen': (85 / 255, 107 / 255, 47 / 255, 1),
'darkorange': (255 / 255, 140 / 255, 0 / 255, 1),
'darkorchid': (153 / 255, 50 / 255, 204 / 255, 1),
'darkred': (139 / 255, 0 / 255, 0 / 255, 1),
'darksalmon': (233 / 255, 150 / 255, 122 / 255, 1),
'darkseagreen': (143 / 255, 188 / 255, 143 / 255, 1),
'darkslateblue': (72 / 255, 61 / 255, 139 / 255, 1),
'darkslategray': (47 / 255, 79 / 255, 79 / 255, 1),
'darkslategrey': (47 / 255, 79 / 255, 79 / 255, 1),
'darkturquoise': (0 / 255, 206 / 255, 209 / 255, 1),
'darkviolet': (148 / 255, 0 / 255, 211 / 255, 1),
'deeppink': (255 / 255, 20 / 255, 147 / 255, 1),
'deepskyblue': (0 / 255, 191 / 255, 255 / 255, 1),
'dimgray': (105 / 255, 105 / 255, 105 / 255, 1),
'dimgrey': (105 / 255, 105 / 255, 105 / 255, 1),
'dodgerblue': (30 / 255, 144 / 255, 255 / 255, 1),
'firebrick': (178 / 255, 34 / 255, 34 / 255, 1),
'floralwhite': (255 / 255, 250 / 255, 240 / 255, 1),
'forestgreen': (34 / 255, 139 / 255, 34 / 255, 1),
'fuchsia': (255 / 255, 0 / 255, 255 / 255, 1),
'gainsboro': (220 / 255, 220 / 255, 220 / 255, 1),
'ghostwhite': (248 / 255, 248 / 255, 255 / 255, 1),
'gold': (255 / 255, 215 / 255, 0 / 255, 1),
'goldenrod': (218 / 255, 165 / 255, 32 / 255, 1),
'gray': (128 / 255, 128 / 255, 128 / 255, 1),
'grey': (128 / 255, 128 / 255, 128 / 255, 1),
'green': (0 / 255, 128 / 255, 0 / 255, 1),
'greenyellow': (173 / 255, 255 / 255, 47 / 255, 1),
'honeydew': (240 / 255, 255 / 255, 240 / 255, 1),
'hotpink': (255 / 255, 105 / 255, 180 / 255, 1),
'indianred': (205 / 255, 92 / 255, 92 / 255, 1),
'indigo': (75 / 255, 0 / 255, 130 / 255, 1),
'ivory': (255 / 255, 255 / 255, 240 / 255, 1),
'khaki': (240 / 255, 230 / 255, 140 / 255, 1),
'lavender': (230 / 255, 230 / 255, 250 / 255, 1),
'lavenderblush': (255 / 255, 240 / 255, 245 / 255, 1),
'lawngreen': (124 / 255, 252 / 255, 0 / 255, 1),
'lemonchiffon': (255 / 255, 250 / 255, 205 / 255, 1),
'lightblue': (173 / 255, 216 / 255, 230 / 255, 1),
'lightcoral': (240 / 255, 128 / 255, 128 / 255, 1),
'lightcyan': (224 / 255, 255 / 255, 255 / 255, 1),
'lightgoldenrodyellow': (250 / 255, 250 / 255, 210 / 255, 1),
'lightgray': (211 / 255, 211 / 255, 211 / 255, 1),
'lightgreen': (144 / 255, 238 / 255, 144 / 255, 1),
'lightgrey': (211 / 255, 211 / 255, 211 / 255, 1),
'lightpink': (255 / 255, 182 / 255, 193 / 255, 1),
'lightsalmon': (255 / 255, 160 / 255, 122 / 255, 1),
'lightseagreen': (32 / 255, 178 / 255, 170 / 255, 1),
'lightskyblue': (135 / 255, 206 / 255, 250 / 255, 1),
'lightslategray': (119 / 255, 136 / 255, 153 / 255, 1),
'lightslategrey': (119 / 255, 136 / 255, 153 / 255, 1),
'lightsteelblue': (176 / 255, 196 / 255, 222 / 255, 1),
'lightyellow': (255 / 255, 255 / 255, 224 / 255, 1),
'lime': (0 / 255, 255 / 255, 0 / 255, 1),
'limegreen': (50 / 255, 205 / 255, 50 / 255, 1),
'linen': (250 / 255, 240 / 255, 230 / 255, 1),
'magenta': (255 / 255, 0 / 255, 255 / 255, 1),
'maroon': (128 / 255, 0 / 255, 0 / 255, 1),
'mediumaquamarine': (102 / 255, 205 / 255, 170 / 255, 1),
'mediumblue': (0 / 255, 0 / 255, 205 / 255, 1),
'mediumorchid': (186 / 255, 85 / 255, 211 / 255, 1),
'mediumpurple': (147 / 255, 112 / 255, 219 / 255, 1),
'mediumseagreen': (60 / 255, 179 / 255, 113 / 255, 1),
'mediumslateblue': (123 / 255, 104 / 255, 238 / 255, 1),
'mediumspringgreen': (0 / 255, 250 / 255, 154 / 255, 1),
'mediumturquoise': (72 / 255, 209 / 255, 204 / 255, 1),
'mediumvioletred': (199 / 255, 21 / 255, 133 / 255, 1),
'midnightblue': (25 / 255, 25 / 255, 112 / 255, 1),
'mintcream': (245 / 255, 255 / 255, 250 / 255, 1),
'mistyrose': (255 / 255, 228 / 255, 225 / 255, 1),
'moccasin': (255 / 255, 228 / 255, 181 / 255, 1),
'navajowhite': (255 / 255, 222 / 255, 173 / 255, 1),
'navy': (0 / 255, 0 / 255, 128 / 255, 1),
'oldlace': (253 / 255, 245 / 255, 230 / 255, 1),
'olive': (128 / 255, 128 / 255, 0 / 255, 1),
'olivedrab': (107 / 255, 142 / 255, 35 / 255, 1),
'orange': (255 / 255, 165 / 255, 0 / 255, 1),
'orangered': (255 / 255, 69 / 255, 0 / 255, 1),
'orchid': (218 / 255, 112 / 255, 214 / 255, 1),
'palegoldenrod': (238 / 255, 232 / 255, 170 / 255, 1),
'palegreen': (152 / 255, 251 / 255, 152 / 255, 1),
'paleturquoise': (175 / 255, 238 / 255, 238 / 255, 1),
'palevioletred': (219 / 255, 112 / 255, 147 / 255, 1),
'papayawhip': (255 / 255, 239 / 255, 213 / 255, 1),
'peachpuff': (255 / 255, 218 / 255, 185 / 255, 1),
'peru': (205 / 255, 133 / 255, 63 / 255, 1),
'pink': (255 / 255, 192 / 255, 203 / 255, 1),
'plum': (221 / 255, 160 / 255, 221 / 255, 1),
'powderblue': (176 / 255, 224 / 255, 230 / 255, 1),
'purple': (128 / 255, 0 / 255, 128 / 255, 1),
'red': (255 / 255, 0 / 255, 0 / 255, 1),
'rosybrown': (188 / 255, 143 / 255, 143 / 255, 1),
'royalblue': (65 / 255, 105 / 255, 225 / 255, 1),
'saddlebrown': (139 / 255, 69 / 255, 19 / 255, 1),
'salmon': (250 / 255, 128 / 255, 114 / 255, 1),
'sandybrown': (244 / 255, 164 / 255, 96 / 255, 1),
'seagreen': (46 / 255, 139 / 255, 87 / 255, 1),
'seashell': (255 / 255, 245 / 255, 238 / 255, 1),
'sienna': (160 / 255, 82 / 255, 45 / 255, 1),
'silver': (192 / 255, 192 / 255, 192 / 255, 1),
'skyblue': (135 / 255, 206 / 255, 235 / 255, 1),
'slateblue': (106 / 255, 90 / 255, 205 / 255, 1),
'slategray': (112 / 255, 128 / 255, 144 / 255, 1),
'slategrey': (112 / 255, 128 / 255, 144 / 255, 1),
'snow': (255 / 255, 250 / 255, 250 / 255, 1),
'springgreen': (0 / 255, 255 / 255, 127 / 255, 1),
'steelblue': (70 / 255, 130 / 255, 180 / 255, 1),
'tan': (210 / 255, 180 / 255, 140 / 255, 1),
'teal': (0 / 255, 128 / 255, 128 / 255, 1),
'thistle': (216 / 255, 191 / 255, 216 / 255, 1),
'tomato': (255 / 255, 99 / 255, 71 / 255, 1),
'turquoise': (64 / 255, 224 / 255, 208 / 255, 1),
'violet': (238 / 255, 130 / 255, 238 / 255, 1),
'wheat': (245 / 255, 222 / 255, 179 / 255, 1),
'white': (255 / 255, 255 / 255, 255 / 255, 1),
'whitesmoke': (245 / 255, 245 / 255, 245 / 255, 1),
'yellow': (255 / 255, 255 / 255, 0 / 255, 1),
'yellowgreen': (154 / 255, 205 / 255, 50 / 255, 1),
'activeborder': (0, 0, 1, 1),
'activecaption': (0, 0, 1, 1),
'appworkspace': (1, 1, 1, 1),
'background': (1, 1, 1, 1),
'buttonface': (0, 0, 0, 1),
'buttonhighlight': (0.8, 0.8, 0.8, 1),
'buttonshadow': (0.2, 0.2, 0.2, 1),
'buttontext': (0, 0, 0, 1),
'captiontext': (0, 0, 0, 1),
'graytext': (0.2, 0.2, 0.2, 1),
'highlight': (0, 0, 1, 1),
'highlighttext': (0.8, 0.8, 0.8, 1),
'inactiveborder': (0.2, 0.2, 0.2, 1),
'inactivecaption': (0.8, 0.8, 0.8, 1),
'inactivecaptiontext': (0.2, 0.2, 0.2, 1),
'infobackground': (0.8, 0.8, 0.8, 1),
'infotext': (0, 0, 0, 1),
'menu': (0.8, 0.8, 0.8, 1),
'menutext': (0.2, 0.2, 0.2, 1),
'scrollbar': (0.8, 0.8, 0.8, 1),
'threeddarkshadow': (0.2, 0.2, 0.2, 1),
'threedface': (0.8, 0.8, 0.8, 1),
'threedhighlight': (1, 1, 1, 1),
'threedlightshadow': (0.2, 0.2, 0.2, 1),
'threedshadow': (0.2, 0.2, 0.2, 1),
'window': (0.8, 0.8, 0.8, 1),
'windowframe': (0.8, 0.8, 0.8, 1),
'windowtext': (0, 0, 0, 1),
'none': (0, 0, 0, 0),
'transparent': (0, 0, 0, 0),
}
RGBA = re.compile(r'rgba\((.+?)\)')
RGB = re.compile(r'rgb\((.+?)\)')
HEX_RRGGBB = re.compile('#[0-9a-f]{6}')
HEX_RGB = re.compile('#[0-9a-f]{3}')
def color(string, opacity=1):
"""Replace ``string`` representing a color by a RGBA tuple.
See http://www.w3.org/TR/SVG/types.html#DataTypeColor
"""
if not string:
return (0, 0, 0, 0)
string = string.strip().lower()
if string in COLORS:
r, g, b, a = COLORS[string]
return (r, g, b, a * opacity)
match = RGBA.search(string)
if match:
r, g, b, a = tuple(
float(i.strip(' %')) / 100 if '%' in i else float(i) / 255
for i in match.group(1).strip().split(','))
return (r, g, b, a * 255 * opacity)
match = RGB.search(string)
if match:
r, g, b = tuple(
float(i.strip(' %')) / 100 if '%' in i else float(i) / 255
for i in match.group(1).strip().split(','))
return (r, g, b, opacity)
match = HEX_RRGGBB.search(string)
if match:
plain_color = tuple(
int(value, 16) / 255 for value in (
string[1:3], string[3:5], string[5:7]))
return plain_color + (opacity,)
match = HEX_RGB.search(string)
if match:
plain_color = tuple(
int(value, 16) / 15 for value in (
string[1], string[2], string[3]))
return plain_color + (opacity,)
return (0, 0, 0, 1)
def negate_color(rgba_tuple):
"""Replace ``rgba_tuple`` with its complementary color."""
r, g, b, a = rgba_tuple
return (1 - r, 1 - g, 1 - b, a)
| ./CrossVul/dataset_final_sorted/CWE-400/py/good_1886_0 |
crossvul-python_data_bad_1890_0 | """Small, fast HTTP client library for Python.
Features persistent connections, cache, and Google App Engine Standard
Environment support.
"""
from __future__ import print_function
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = [
"Thomas Broyer (t.broyer@ltgt.net)",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger",
"Alex Yu",
]
__license__ = "MIT"
__version__ = "0.18.1"
import base64
import calendar
import copy
import email
import email.FeedParser
import email.Message
import email.Utils
import errno
import gzip
import httplib
import os
import random
import re
import StringIO
import sys
import time
import urllib
import urlparse
import zlib
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
# prior to Python 2.5, these were separate modules
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
from httplib2 import socks
except ImportError:
try:
import socks
except (ImportError, AttributeError):
socks = None
# Build the appropriate socket wrapper for ssl
ssl = None
ssl_SSLError = None
ssl_CertificateError = None
try:
import ssl # python 2.6
except ImportError:
pass
if ssl is not None:
ssl_SSLError = getattr(ssl, "SSLError", None)
ssl_CertificateError = getattr(ssl, "CertificateError", None)
def _ssl_wrap_socket(
sock, key_file, cert_file, disable_validation, ca_certs, ssl_version, hostname, key_password
):
if disable_validation:
cert_reqs = ssl.CERT_NONE
else:
cert_reqs = ssl.CERT_REQUIRED
if ssl_version is None:
ssl_version = ssl.PROTOCOL_SSLv23
if hasattr(ssl, "SSLContext"): # Python 2.7.9
context = ssl.SSLContext(ssl_version)
context.verify_mode = cert_reqs
context.check_hostname = cert_reqs != ssl.CERT_NONE
if cert_file:
if key_password:
context.load_cert_chain(cert_file, key_file, key_password)
else:
context.load_cert_chain(cert_file, key_file)
if ca_certs:
context.load_verify_locations(ca_certs)
return context.wrap_socket(sock, server_hostname=hostname)
else:
if key_password:
raise NotSupportedOnThisPlatform("Certificate with password is not supported.")
return ssl.wrap_socket(
sock,
keyfile=key_file,
certfile=cert_file,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
ssl_version=ssl_version,
)
def _ssl_wrap_socket_unsupported(
sock, key_file, cert_file, disable_validation, ca_certs, ssl_version, hostname, key_password
):
if not disable_validation:
raise CertificateValidationUnsupported(
"SSL certificate validation is not supported without "
"the ssl module installed. To avoid this error, install "
"the ssl module, or explicity disable validation."
)
if key_password:
raise NotSupportedOnThisPlatform("Certificate with password is not supported.")
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if ssl is None:
_ssl_wrap_socket = _ssl_wrap_socket_unsupported
if sys.version_info >= (2, 3):
from .iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, "_GLOBAL_DEFAULT_TIMEOUT"):
return timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT
return timeout is not None
__all__ = [
"Http",
"Response",
"ProxyInfo",
"HttpLib2Error",
"RedirectMissingLocation",
"RedirectLimit",
"FailedToDecompressContent",
"UnimplementedDigestAuthOptionError",
"UnimplementedHmacDigestAuthOptionError",
"debuglevel",
"ProxiesUnavailableError",
]
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# A request will be tried 'RETRIES' times if it fails at the socket/connection level.
RETRIES = 2
# Python 2.3 support
if sys.version_info < (2, 4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, "getheaders"):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception):
pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse):
pass
class RedirectLimit(HttpLib2ErrorWithResponse):
pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse):
pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse):
pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse):
pass
class MalformedHeader(HttpLib2Error):
pass
class RelativeURIError(HttpLib2Error):
pass
class ServerNotFoundError(HttpLib2Error):
pass
class ProxiesUnavailableError(HttpLib2Error):
pass
class CertificateValidationUnsupported(HttpLib2Error):
pass
class SSLHandshakeError(HttpLib2Error):
pass
class NotSupportedOnThisPlatform(HttpLib2Error):
pass
class CertificateHostnameMismatch(SSLHandshakeError):
def __init__(self, desc, host, cert):
HttpLib2Error.__init__(self, desc)
self.host = host
self.cert = cert
class NotRunningAppEngineEnvironment(HttpLib2Error):
pass
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
from httplib2 import certs
CA_CERTS = certs.where()
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = [
"connection",
"keep-alive",
"proxy-authenticate",
"proxy-authorization",
"te",
"trailers",
"transfer-encoding",
"upgrade",
]
# https://tools.ietf.org/html/rfc7231#section-8.1.3
SAFE_METHODS = ("GET", "HEAD") # TODO add "OPTIONS", "TRACE"
# To change, assign to `Http().redirect_codes`
REDIRECT_CODES = frozenset((300, 301, 302, 303, 307, 308))
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get("connection", "").split(",")])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r"^\w+://")
re_unsafe = re.compile(r"[^\w\-_.()=!]+")
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
if isinstance(filename, str):
filename_bytes = filename
filename = filename.decode("utf-8")
else:
filename_bytes = filename.encode("utf-8")
filemd5 = _md5(filename_bytes).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_unsafe.sub("", filename)
# limit length of filename (vital for Windows)
# https://github.com/httplib2/httplib2/pull/74
# C:\Users\ <username> \AppData\Local\Temp\ <safe_filename> , <md5>
# 9 chars + max 104 chars + 20 chars + x + 1 + 32 = max 259 chars
# Thus max safe filename x = 93 chars. Let it be 90 to make a round sum:
filename = filename[:90]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r"(?:\r\n)?[ \t]+")
def _normalize_headers(headers):
return dict(
[
(key.lower(), NORMALIZE_SPACE.sub(value, " ").strip())
for (key, value) in headers.iteritems()
]
)
def _parse_cache_control(headers):
retval = {}
if "cache-control" in headers:
parts = headers["cache-control"].split(",")
parts_with_args = [
tuple([x.strip().lower() for x in part.split("=", 1)])
for part in parts
if -1 != part.find("=")
]
parts_wo_args = [
(name.strip().lower(), 1) for name in parts if -1 == name.find("=")
]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(
r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$"
)
WWW_AUTH_RELAXED = re.compile(
r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$"
)
UNQUOTE_PAIRS = re.compile(r"\\(.)")
def _parse_www_authenticate(headers, headername="www-authenticate"):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headername in headers:
try:
authenticate = headers[headername].strip()
www_auth = (
USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
)
while authenticate:
# Break off the scheme at the beginning of the line
if headername == "authentication-info":
(auth_scheme, the_rest) = ("digest", authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(
r"\1", value
) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
except ValueError:
raise MalformedHeader("WWW-Authenticate")
return retval
# TODO: add current time as _entry_disposition argument to avoid sleep in tests
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if (
"pragma" in request_headers
and request_headers["pragma"].lower().find("no-cache") != -1
):
retval = "TRANSPARENT"
if "cache-control" not in request_headers:
request_headers["cache-control"] = "no-cache"
elif "no-cache" in cc:
retval = "TRANSPARENT"
elif "no-cache" in cc_response:
retval = "STALE"
elif "only-if-cached" in cc:
retval = "FRESH"
elif "date" in response_headers:
date = calendar.timegm(email.Utils.parsedate_tz(response_headers["date"]))
now = time.time()
current_age = max(0, now - date)
if "max-age" in cc_response:
try:
freshness_lifetime = int(cc_response["max-age"])
except ValueError:
freshness_lifetime = 0
elif "expires" in response_headers:
expires = email.Utils.parsedate_tz(response_headers["expires"])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if "max-age" in cc:
try:
freshness_lifetime = int(cc["max-age"])
except ValueError:
freshness_lifetime = 0
if "min-fresh" in cc:
try:
min_fresh = int(cc["min-fresh"])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get("content-encoding", None)
if encoding in ["gzip", "deflate"]:
if encoding == "gzip":
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == "deflate":
content = zlib.decompress(content, -zlib.MAX_WBITS)
response["content-length"] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response["-content-encoding"] = response["content-encoding"]
del response["content-encoding"]
except (IOError, zlib.error):
content = ""
raise FailedToDecompressContent(
_("Content purported to be compressed with %s but failed to decompress.")
% response.get("content-encoding"),
response,
content,
)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if "no-store" in cc or "no-store" in cc_response:
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ["status", "content-encoding", "transfer-encoding"]:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get("vary", None)
if vary:
vary_headers = vary.lower().replace(" ", "").split(",")
for header in vary_headers:
key = "-varied-%s" % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = "status: %d\r\n" % status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5(
"%s:%s"
% (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])
).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(
_sha("%s%s%s" % (cnonce, iso_now, password)).digest()
).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(
self, credentials, host, request_uri, headers, response, content, http
):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path) :].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-ride this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(
self, credentials, host, request_uri, headers, response, content, http
):
Authentication.__init__(
self, credentials, host, request_uri, headers, response, content, http
)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers["authorization"] = (
"Basic " + base64.b64encode("%s:%s" % self.credentials).strip()
)
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(
self, credentials, host, request_uri, headers, response, content, http
):
Authentication.__init__(
self, credentials, host, request_uri, headers, response, content, http
)
challenge = _parse_www_authenticate(response, "www-authenticate")
self.challenge = challenge["digest"]
qop = self.challenge.get("qop", "auth")
self.challenge["qop"] = (
("auth" in [x.strip() for x in qop.split()]) and "auth" or None
)
if self.challenge["qop"] is None:
raise UnimplementedDigestAuthOptionError(
_("Unsupported value for qop: %s." % qop)
)
self.challenge["algorithm"] = self.challenge.get("algorithm", "MD5").upper()
if self.challenge["algorithm"] != "MD5":
raise UnimplementedDigestAuthOptionError(
_("Unsupported value for algorithm: %s." % self.challenge["algorithm"])
)
self.A1 = "".join(
[
self.credentials[0],
":",
self.challenge["realm"],
":",
self.credentials[1],
]
)
self.challenge["nc"] = 1
def request(self, method, request_uri, headers, content, cnonce=None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge["cnonce"] = cnonce or _cnonce()
request_digest = '"%s"' % KD(
H(self.A1),
"%s:%s:%s:%s:%s"
% (
self.challenge["nonce"],
"%08x" % self.challenge["nc"],
self.challenge["cnonce"],
self.challenge["qop"],
H(A2),
),
)
headers["authorization"] = (
'Digest username="%s", realm="%s", nonce="%s", '
'uri="%s", algorithm=%s, response=%s, qop=%s, '
'nc=%08x, cnonce="%s"'
) % (
self.credentials[0],
self.challenge["realm"],
self.challenge["nonce"],
request_uri,
self.challenge["algorithm"],
request_digest,
self.challenge["qop"],
self.challenge["nc"],
self.challenge["cnonce"],
)
if self.challenge.get("opaque"):
headers["authorization"] += ', opaque="%s"' % self.challenge["opaque"]
self.challenge["nc"] += 1
def response(self, response, content):
if "authentication-info" not in response:
challenge = _parse_www_authenticate(response, "www-authenticate").get(
"digest", {}
)
if "true" == challenge.get("stale"):
self.challenge["nonce"] = challenge["nonce"]
self.challenge["nc"] = 1
return True
else:
updated_challenge = _parse_www_authenticate(
response, "authentication-info"
).get("digest", {})
if "nextnonce" in updated_challenge:
self.challenge["nonce"] = updated_challenge["nextnonce"]
self.challenge["nc"] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer (t.broyer@ltgt.net)"
def __init__(
self, credentials, host, request_uri, headers, response, content, http
):
Authentication.__init__(
self, credentials, host, request_uri, headers, response, content, http
)
challenge = _parse_www_authenticate(response, "www-authenticate")
self.challenge = challenge["hmacdigest"]
# TODO: self.challenge['domain']
self.challenge["reason"] = self.challenge.get("reason", "unauthorized")
if self.challenge["reason"] not in ["unauthorized", "integrity"]:
self.challenge["reason"] = "unauthorized"
self.challenge["salt"] = self.challenge.get("salt", "")
if not self.challenge.get("snonce"):
raise UnimplementedHmacDigestAuthOptionError(
_("The challenge doesn't contain a server nonce, or this one is empty.")
)
self.challenge["algorithm"] = self.challenge.get("algorithm", "HMAC-SHA-1")
if self.challenge["algorithm"] not in ["HMAC-SHA-1", "HMAC-MD5"]:
raise UnimplementedHmacDigestAuthOptionError(
_("Unsupported value for algorithm: %s." % self.challenge["algorithm"])
)
self.challenge["pw-algorithm"] = self.challenge.get("pw-algorithm", "SHA-1")
if self.challenge["pw-algorithm"] not in ["SHA-1", "MD5"]:
raise UnimplementedHmacDigestAuthOptionError(
_(
"Unsupported value for pw-algorithm: %s."
% self.challenge["pw-algorithm"]
)
)
if self.challenge["algorithm"] == "HMAC-MD5":
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge["pw-algorithm"] == "MD5":
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join(
[
self.credentials[0],
":",
self.pwhashmod.new(
"".join([self.credentials[1], self.challenge["salt"]])
)
.hexdigest()
.lower(),
":",
self.challenge["realm"],
]
)
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (
method,
request_uri,
cnonce,
self.challenge["snonce"],
headers_val,
)
request_digest = (
hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
)
headers["authorization"] = (
'HMACDigest username="%s", realm="%s", snonce="%s",'
' cnonce="%s", uri="%s", created="%s", '
'response="%s", headers="%s"'
) % (
self.credentials[0],
self.challenge["realm"],
self.challenge["snonce"],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, "www-authenticate").get(
"hmacdigest", {}
)
if challenge.get("reason") in ["integrity", "stale"]:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(
self, credentials, host, request_uri, headers, response, content, http
):
Authentication.__init__(
self, credentials, host, request_uri, headers, response, content, http
)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers["authorization"] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers["X-WSSE"] = (
'UsernameToken Username="%s", PasswordDigest="%s", '
'Nonce="%s", Created="%s"'
) % (self.credentials[0], password_digest, cnonce, iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(
self, credentials, host, request_uri, headers, response, content, http
):
from urllib import urlencode
Authentication.__init__(
self, credentials, host, request_uri, headers, response, content, http
)
challenge = _parse_www_authenticate(response, "www-authenticate")
service = challenge["googlelogin"].get("service", "xapi")
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == "xapi" and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
# elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(
Email=credentials[0],
Passwd=credentials[1],
service=service,
source=headers["user-agent"],
)
resp, content = self.http.request(
"https://www.google.com/accounts/ClientLogin",
method="POST",
body=urlencode(auth),
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
lines = content.split("\n")
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d["Auth"]
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers["authorization"] = "GoogleLogin Auth=" + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication,
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(
self, cache, safe=safename
): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
def add(self, key, cert, domain, password):
self.credentials.append((domain.lower(), key, cert, password))
def iter(self, domain):
for (cdomain, key, cert, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (key, cert, password)
class AllHosts(object):
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
bypass_hosts = ()
def __init__(
self,
proxy_type,
proxy_host,
proxy_port,
proxy_rdns=True,
proxy_user=None,
proxy_pass=None,
proxy_headers=None,
):
"""Args:
proxy_type: The type of proxy server. This must be set to one of
socks.PROXY_TYPE_XXX constants. For example: p =
ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost',
proxy_port=8000)
proxy_host: The hostname or IP address of the proxy server.
proxy_port: The port that the proxy server is running on.
proxy_rdns: If True (default), DNS queries will not be performed
locally, and instead, handed to the proxy to resolve. This is useful
if the network does not allow resolution of non-local names. In
httplib2 0.9 and earlier, this defaulted to False.
proxy_user: The username used to authenticate with the proxy server.
proxy_pass: The password used to authenticate with the proxy server.
proxy_headers: Additional or modified headers for the proxy connect
request.
"""
self.proxy_type = proxy_type
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_rdns = proxy_rdns
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
self.proxy_headers = proxy_headers
def astuple(self):
return (
self.proxy_type,
self.proxy_host,
self.proxy_port,
self.proxy_rdns,
self.proxy_user,
self.proxy_pass,
self.proxy_headers,
)
def isgood(self):
return (self.proxy_host != None) and (self.proxy_port != None)
def applies_to(self, hostname):
return not self.bypass_host(hostname)
def bypass_host(self, hostname):
"""Has this host been excluded from the proxy config"""
if self.bypass_hosts is AllHosts:
return True
hostname = "." + hostname.lstrip(".")
for skip_name in self.bypass_hosts:
# *.suffix
if skip_name.startswith(".") and hostname.endswith(skip_name):
return True
# exact match
if hostname == "." + skip_name:
return True
return False
def __repr__(self):
return (
"<ProxyInfo type={p.proxy_type} "
"host:port={p.proxy_host}:{p.proxy_port} rdns={p.proxy_rdns}"
+ " user={p.proxy_user} headers={p.proxy_headers}>"
).format(p=self)
def proxy_info_from_environment(method="http"):
"""Read proxy info from the environment variables.
"""
if method not in ["http", "https"]:
return
env_var = method + "_proxy"
url = os.environ.get(env_var, os.environ.get(env_var.upper()))
if not url:
return
return proxy_info_from_url(url, method, None)
def proxy_info_from_url(url, method="http", noproxy=None):
"""Construct a ProxyInfo from a URL (such as http_proxy env var)
"""
url = urlparse.urlparse(url)
username = None
password = None
port = None
if "@" in url[1]:
ident, host_port = url[1].split("@", 1)
if ":" in ident:
username, password = ident.split(":", 1)
else:
password = ident
else:
host_port = url[1]
if ":" in host_port:
host, port = host_port.split(":", 1)
else:
host = host_port
if port:
port = int(port)
else:
port = dict(https=443, http=80)[method]
proxy_type = 3 # socks.PROXY_TYPE_HTTP
pi = ProxyInfo(
proxy_type=proxy_type,
proxy_host=host,
proxy_port=port,
proxy_user=username or None,
proxy_pass=password or None,
proxy_headers=None,
)
bypass_hosts = []
# If not given an explicit noproxy value, respect values in env vars.
if noproxy is None:
noproxy = os.environ.get("no_proxy", os.environ.get("NO_PROXY", ""))
# Special case: A single '*' character means all hosts should be bypassed.
if noproxy == "*":
bypass_hosts = AllHosts
elif noproxy.strip():
bypass_hosts = noproxy.split(",")
bypass_hosts = filter(bool, bypass_hosts) # To exclude empty string.
pi.bypass_hosts = bypass_hosts
return pi
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""HTTPConnection subclass that supports timeouts
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
if self.proxy_info and socks is None:
raise ProxiesUnavailableError(
"Proxy support missing but proxy use was requested!"
)
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers = (
self.proxy_info.astuple()
)
host = proxy_host
port = proxy_port
else:
use_proxy = False
host = self.host
port = self.port
socket_err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if use_proxy:
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(
proxy_type,
proxy_host,
proxy_port,
proxy_rdns,
proxy_user,
proxy_pass,
proxy_headers,
)
else:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print("connect: (%s, %s) ************" % (self.host, self.port))
if use_proxy:
print(
"proxy: %s ************"
% str(
(
proxy_host,
proxy_port,
proxy_rdns,
proxy_user,
proxy_pass,
proxy_headers,
)
)
)
if use_proxy:
self.sock.connect((self.host, self.port) + sa[2:])
else:
self.sock.connect(sa)
except socket.error as e:
socket_err = e
if self.debuglevel > 0:
print("connect fail: (%s, %s)" % (self.host, self.port))
if use_proxy:
print(
"proxy: %s"
% str(
(
proxy_host,
proxy_port,
proxy_rdns,
proxy_user,
proxy_pass,
proxy_headers,
)
)
)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket_err or socket.error("getaddrinfo returns an empty list")
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"""This class allows communication via SSL.
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(
self,
host,
port=None,
key_file=None,
cert_file=None,
strict=None,
timeout=None,
proxy_info=None,
ca_certs=None,
disable_ssl_certificate_validation=False,
ssl_version=None,
key_password=None,
):
if key_password:
httplib.HTTPSConnection.__init__(self, host, port=port, strict=strict)
self._context.load_cert_chain(cert_file, key_file, key_password)
self.key_file = key_file
self.cert_file = cert_file
self.key_password = key_password
else:
httplib.HTTPSConnection.__init__(
self, host, port=port, key_file=key_file, cert_file=cert_file, strict=strict
)
self.key_password = None
self.timeout = timeout
self.proxy_info = proxy_info
if ca_certs is None:
ca_certs = CA_CERTS
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = disable_ssl_certificate_validation
self.ssl_version = ssl_version
# The following two methods were adapted from https_wrapper.py, released
# with the Google Appengine SDK at
# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
# under the following license:
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def _GetValidHostsForCert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if "subjectAltName" in cert:
return [x[1] for x in cert["subjectAltName"] if x[0].lower() == "dns"]
else:
return [x[0][1] for x in cert["subject"] if x[0][0].lower() == "commonname"]
def _ValidateCertificateHostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._GetValidHostsForCert(cert)
for host in hosts:
host_re = host.replace(".", "\.").replace("*", "[^.]*")
if re.search("^%s$" % (host_re,), hostname, re.I):
return True
return False
def connect(self):
"Connect to a host on a given (SSL) port."
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers = (
self.proxy_info.astuple()
)
host = proxy_host
port = proxy_port
else:
use_proxy = False
host = self.host
port = self.port
socket_err = None
address_info = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
for family, socktype, proto, canonname, sockaddr in address_info:
try:
if use_proxy:
sock = socks.socksocket(family, socktype, proto)
sock.setproxy(
proxy_type,
proxy_host,
proxy_port,
proxy_rdns,
proxy_user,
proxy_pass,
proxy_headers,
)
else:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
if use_proxy:
sock.connect((self.host, self.port) + sockaddr[:2])
else:
sock.connect(sockaddr)
self.sock = _ssl_wrap_socket(
sock,
self.key_file,
self.cert_file,
self.disable_ssl_certificate_validation,
self.ca_certs,
self.ssl_version,
self.host,
self.key_password,
)
if self.debuglevel > 0:
print("connect: (%s, %s)" % (self.host, self.port))
if use_proxy:
print(
"proxy: %s"
% str(
(
proxy_host,
proxy_port,
proxy_rdns,
proxy_user,
proxy_pass,
proxy_headers,
)
)
)
if not self.disable_ssl_certificate_validation:
cert = self.sock.getpeercert()
hostname = self.host.split(":", 0)[0]
if not self._ValidateCertificateHostname(cert, hostname):
raise CertificateHostnameMismatch(
"Server presented certificate that does not match "
"host %s: %s" % (hostname, cert),
hostname,
cert,
)
except (
ssl_SSLError,
ssl_CertificateError,
CertificateHostnameMismatch,
) as e:
if sock:
sock.close()
if self.sock:
self.sock.close()
self.sock = None
# Unfortunately the ssl module doesn't seem to provide any way
# to get at more detailed error information, in particular
# whether the error is due to certificate validation or
# something else (such as SSL protocol mismatch).
if getattr(e, "errno", None) == ssl.SSL_ERROR_SSL:
raise SSLHandshakeError(e)
else:
raise
except (socket.timeout, socket.gaierror):
raise
except socket.error as e:
socket_err = e
if self.debuglevel > 0:
print("connect fail: (%s, %s)" % (self.host, self.port))
if use_proxy:
print(
"proxy: %s"
% str(
(
proxy_host,
proxy_port,
proxy_rdns,
proxy_user,
proxy_pass,
proxy_headers,
)
)
)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket_err or socket.error("getaddrinfo returns an empty list")
SCHEME_TO_CONNECTION = {
"http": HTTPConnectionWithTimeout,
"https": HTTPSConnectionWithTimeout,
}
def _new_fixed_fetch(validate_certificate):
def fixed_fetch(
url,
payload=None,
method="GET",
headers={},
allow_truncated=False,
follow_redirects=True,
deadline=None,
):
return fetch(
url,
payload=payload,
method=method,
headers=headers,
allow_truncated=allow_truncated,
follow_redirects=follow_redirects,
deadline=deadline,
validate_certificate=validate_certificate,
)
return fixed_fetch
class AppEngineHttpConnection(httplib.HTTPConnection):
"""Use httplib on App Engine, but compensate for its weirdness.
The parameters key_file, cert_file, proxy_info, ca_certs,
disable_ssl_certificate_validation, and ssl_version are all dropped on
the ground.
"""
def __init__(
self,
host,
port=None,
key_file=None,
cert_file=None,
strict=None,
timeout=None,
proxy_info=None,
ca_certs=None,
disable_ssl_certificate_validation=False,
ssl_version=None,
):
httplib.HTTPConnection.__init__(
self, host, port=port, strict=strict, timeout=timeout
)
class AppEngineHttpsConnection(httplib.HTTPSConnection):
"""Same as AppEngineHttpConnection, but for HTTPS URIs.
The parameters proxy_info, ca_certs, disable_ssl_certificate_validation,
and ssl_version are all dropped on the ground.
"""
def __init__(
self,
host,
port=None,
key_file=None,
cert_file=None,
strict=None,
timeout=None,
proxy_info=None,
ca_certs=None,
disable_ssl_certificate_validation=False,
ssl_version=None,
key_password=None,
):
if key_password:
raise NotSupportedOnThisPlatform("Certificate with password is not supported.")
httplib.HTTPSConnection.__init__(
self,
host,
port=port,
key_file=key_file,
cert_file=cert_file,
strict=strict,
timeout=timeout,
)
self._fetch = _new_fixed_fetch(not disable_ssl_certificate_validation)
# Use a different connection object for Google App Engine Standard Environment.
def is_gae_instance():
server_software = os.environ.get('SERVER_SOFTWARE', '')
if (server_software.startswith('Google App Engine/') or
server_software.startswith('Development/') or
server_software.startswith('testutil/')):
return True
return False
try:
if not is_gae_instance():
raise NotRunningAppEngineEnvironment()
from google.appengine.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub("urlfetch") is None:
raise ImportError
from google.appengine.api.urlfetch import fetch
# Update the connection classes to use the Googel App Engine specific ones.
SCHEME_TO_CONNECTION = {
"http": AppEngineHttpConnection,
"https": AppEngineHttpsConnection,
}
except (ImportError, NotRunningAppEngineEnvironment):
pass
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(
self,
cache=None,
timeout=None,
proxy_info=proxy_info_from_environment,
ca_certs=None,
disable_ssl_certificate_validation=False,
ssl_version=None,
):
"""If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
`proxy_info` may be:
- a callable that takes the http scheme ('http' or 'https') and
returns a ProxyInfo instance per request. By default, uses
proxy_nfo_from_environment.
- a ProxyInfo instance (static proxy config).
- None (proxy disabled).
ca_certs is the path of a file containing root CA certificates for SSL
server certificate validation. By default, a CA cert file bundled with
httplib2 is used.
If disable_ssl_certificate_validation is true, SSL cert validation will
not be performed.
By default, ssl.PROTOCOL_SSLv23 will be used for the ssl version.
"""
self.proxy_info = proxy_info
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = disable_ssl_certificate_validation
self.ssl_version = ssl_version
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, basestring):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
self.redirect_codes = REDIRECT_CODES
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT", "PATCH"]
self.safe_methods = list(SAFE_METHODS)
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
# Keep Authorization: headers on a redirect.
self.forward_authorization_headers = False
def close(self):
"""Close persistent connections, clear sensitive data.
Not thread-safe, requires external synchronization against concurrent requests.
"""
existing, self.connections = self.connections, {}
for _, c in existing.iteritems():
c.close()
self.certificates.clear()
self.clear_credentials()
def __getstate__(self):
state_dict = copy.copy(self.__dict__)
# In case request is augmented by some foreign object such as
# credentials which handle auth
if "request" in state_dict:
del state_dict["request"]
if "connections" in state_dict:
del state_dict["connections"]
return state_dict
def __setstate__(self, state):
self.__dict__.update(state)
self.connections = {}
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, "www-authenticate")
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if scheme in challenges:
yield AUTH_SCHEME_CLASSES[scheme](
cred, host, request_uri, headers, response, content, self
)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain, password=None):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain, password)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
i = 0
seen_bad_status_line = False
while i < RETRIES:
i += 1
try:
if hasattr(conn, "sock") and conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except ssl_SSLError:
conn.close()
raise
except socket.error as e:
err = 0
if hasattr(e, "args"):
err = getattr(e, "args")[0]
else:
err = e.errno
if err == errno.ECONNREFUSED: # Connection refused
raise
if err in (errno.ENETUNREACH, errno.EADDRNOTAVAIL) and i < RETRIES:
continue # retry on potentially transient socket errors
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
if hasattr(conn, "sock") and conn.sock is None:
if i < RETRIES - 1:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i < RETRIES - 1:
conn.close()
conn.connect()
continue
try:
response = conn.getresponse()
except httplib.BadStatusLine:
# If we get a BadStatusLine on the first try then that means
# the connection just went stale, so retry regardless of the
# number of RETRIES set.
if not seen_bad_status_line and i == 1:
i = 0
seen_bad_status_line = True
conn.close()
conn.connect()
continue
else:
conn.close()
raise
except (socket.error, httplib.HTTPException):
if i < RETRIES - 1:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
else:
content = ""
if method == "HEAD":
conn.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(
self,
conn,
host,
absolute_uri,
request_uri,
method,
body,
headers,
redirections,
cachekey,
):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [
(auth.depth(request_uri), auth)
for auth in self.authorizations
if auth.inscope(host, request_uri)
]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(
conn, request_uri, method, body, headers
)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(
conn, request_uri, method, body, headers
)
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(
host, request_uri, headers, response, content
):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(
conn, request_uri, method, body, headers
)
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (
self.follow_all_redirects
or method in self.safe_methods
or response.status in (303, 308)
):
if self.follow_redirects and response.status in self.redirect_codes:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if "location" not in response and response.status != 300:
raise RedirectMissingLocation(
_(
"Redirected but the response is missing a Location: header."
),
response,
content,
)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if "location" in response:
location = response["location"]
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response["location"] = urlparse.urljoin(
absolute_uri, location
)
if response.status == 308 or (response.status == 301 and method in self.safe_methods):
response["-x-permanent-redirect-url"] = response["location"]
if "content-location" not in response:
response["content-location"] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if "if-none-match" in headers:
del headers["if-none-match"]
if "if-modified-since" in headers:
del headers["if-modified-since"]
if (
"authorization" in headers
and not self.forward_authorization_headers
):
del headers["authorization"]
if "location" in response:
location = response["location"]
old_response = copy.deepcopy(response)
if "content-location" not in old_response:
old_response["content-location"] = absolute_uri
redirect_method = method
if response.status in [302, 303]:
redirect_method = "GET"
body = None
(response, content) = self.request(
location,
method=redirect_method,
body=body,
headers=headers,
redirections=redirections - 1,
)
response.previous = old_response
else:
raise RedirectLimit(
"Redirected more times than rediection_limit allows.",
response,
content,
)
elif response.status in [200, 203] and method in self.safe_methods:
# Don't cache 206's since we aren't going to handle byte range requests
if "content-location" not in response:
response["content-location"] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(
self,
uri,
method="GET",
body=None,
headers=None,
redirections=DEFAULT_MAX_REDIRECTS,
connection_type=None,
):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin with either
'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE,
etc. There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a
string object.
Any extra headers that are to be sent with the request should be
provided in the 'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
conn_key = ''
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if "user-agent" not in headers:
headers["user-agent"] = "Python-httplib2/%s (gzip)" % __version__
uri = iri2uri(uri)
# Prevent CWE-75 space injection to manipulate request via part of uri.
# Prevent CWE-93 CRLF injection to modify headers via part of uri.
uri = uri.replace(" ", "%20").replace("\r", "%0D").replace("\n", "%0A")
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
proxy_info = self._get_proxy_info(scheme, authority)
conn_key = scheme + ":" + authority
conn = self.connections.get(conn_key)
if conn is None:
if not connection_type:
connection_type = SCHEME_TO_CONNECTION[scheme]
certs = list(self.certificates.iter(authority))
if scheme == "https":
if certs:
conn = self.connections[conn_key] = connection_type(
authority,
key_file=certs[0][0],
cert_file=certs[0][1],
timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=self.disable_ssl_certificate_validation,
ssl_version=self.ssl_version,
key_password=certs[0][2],
)
else:
conn = self.connections[conn_key] = connection_type(
authority,
timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=self.disable_ssl_certificate_validation,
ssl_version=self.ssl_version,
)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout, proxy_info=proxy_info
)
conn.set_debuglevel(debuglevel)
if "range" not in headers and "accept-encoding" not in headers:
headers["accept-encoding"] = "gzip, deflate"
info = email.Message.Message()
cachekey = None
cached_value = None
if self.cache:
cachekey = defrag_uri.encode("utf-8")
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split("\r\n\r\n", 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except (IndexError, ValueError):
self.cache.delete(cachekey)
cachekey = None
cached_value = None
if (
method in self.optimistic_concurrency_methods
and self.cache
and "etag" in info
and not self.ignore_etag
and "if-match" not in headers
):
# http://www.w3.org/1999/04/Editing/
headers["if-match"] = info["etag"]
# https://tools.ietf.org/html/rfc7234
# A cache MUST invalidate the effective Request URI as well as [...] Location and Content-Location
# when a non-error status code is received in response to an unsafe request method.
if self.cache and cachekey and method not in self.safe_methods:
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in self.safe_methods and "vary" in info:
vary = info["vary"]
vary_headers = vary.lower().replace(" ", "").split(",")
for header in vary_headers:
key = "-varied-%s" % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if (
self.cache
and cached_value
and (method in self.safe_methods or info["status"] == "308")
and "range" not in headers
):
redirect_method = method
if info["status"] not in ("307", "308"):
redirect_method = "GET"
if "-x-permanent-redirect-url" in info:
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit(
"Redirected more times than rediection_limit allows.",
{},
"",
)
(response, new_content) = self.request(
info["-x-permanent-redirect-url"],
method=redirect_method,
headers=headers,
redirections=redirections - 1,
)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info["status"] = "504"
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if (
"etag" in info
and not self.ignore_etag
and not "if-none-match" in headers
):
headers["if-none-match"] = info["etag"]
if "last-modified" in info and not "last-modified" in headers:
headers["if-modified-since"] = info["last-modified"]
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(
conn,
authority,
uri,
request_uri,
method,
body,
headers,
redirections,
cachekey,
)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(
headers, merged_response, content, self.cache, cachekey
)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if "only-if-cached" in cc:
info["status"] = "504"
response = Response(info)
content = ""
else:
(response, content) = self._request(
conn,
authority,
uri,
request_uri,
method,
body,
headers,
redirections,
cachekey,
)
except Exception as e:
is_timeout = isinstance(e, socket.timeout)
if is_timeout:
conn = self.connections.pop(conn_key, None)
if conn:
conn.close()
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif is_timeout:
content = "Request Timeout"
response = Response(
{
"content-type": "text/plain",
"status": "408",
"content-length": len(content),
}
)
response.reason = "Request Timeout"
else:
content = str(e)
response = Response(
{
"content-type": "text/plain",
"status": "400",
"content-length": len(content),
}
)
response.reason = "Bad Request"
else:
raise
return (response, content)
def _get_proxy_info(self, scheme, authority):
"""Return a ProxyInfo instance (or None) based on the scheme
and authority.
"""
hostname, port = urllib.splitport(authority)
proxy_info = self.proxy_info
if callable(proxy_info):
proxy_info = proxy_info(scheme)
if hasattr(proxy_info, "applies_to") and not proxy_info.applies_to(hostname):
proxy_info = None
return proxy_info
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server.
10 for HTTP/1.0, 11 for HTTP/1.1.
"""
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self["status"] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key.lower()] = value
self.status = int(self["status"])
else:
for key, value in info.iteritems():
self[key.lower()] = value
self.status = int(self.get("status", self.status))
self.reason = self.get("reason", self.reason)
def __getattr__(self, name):
if name == "dict":
return self
else:
raise AttributeError(name)
| ./CrossVul/dataset_final_sorted/CWE-400/py/bad_1890_0 |
crossvul-python_data_bad_4542_0 | """
This contains a bunch of RFC7230 definitions and regular expressions that are
needed to properly parse HTTP messages.
"""
import re
from .compat import tobytes
WS = "[ \t]"
OWS = WS + "{0,}?"
RWS = WS + "{1,}?"
BWS = OWS
# RFC 7230 Section 3.2.6 "Field Value Components":
# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
# / DIGIT / ALPHA
# obs-text = %x80-FF
TCHAR = r"[!#$%&'*+\-.^_`|~0-9A-Za-z]"
OBS_TEXT = r"\x80-\xff"
TOKEN = TCHAR + "{1,}"
# RFC 5234 Appendix B.1 "Core Rules":
# VCHAR = %x21-7E
# ; visible (printing) characters
VCHAR = r"\x21-\x7e"
# header-field = field-name ":" OWS field-value OWS
# field-name = token
# field-value = *( field-content / obs-fold )
# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
# field-vchar = VCHAR / obs-text
# Errata from: https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189
# changes field-content to:
#
# field-content = field-vchar [ 1*( SP / HTAB / field-vchar )
# field-vchar ]
FIELD_VCHAR = "[" + VCHAR + OBS_TEXT + "]"
FIELD_CONTENT = FIELD_VCHAR + "([ \t" + VCHAR + OBS_TEXT + "]+" + FIELD_VCHAR + "){,1}"
FIELD_VALUE = "(" + FIELD_CONTENT + "){0,}"
HEADER_FIELD = re.compile(
tobytes(
"^(?P<name>" + TOKEN + "):" + OWS + "(?P<value>" + FIELD_VALUE + ")" + OWS + "$"
)
)
OWS_STRIP = re.compile(OWS + "(?P<value>.*?)" + OWS)
| ./CrossVul/dataset_final_sorted/CWE-400/py/bad_4542_0 |
crossvul-python_data_good_4373_2 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import re
from typing import Optional, Tuple, Type
import synapse
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
from synapse.api.room_versions import RoomVersions
from synapse.api.urls import (
FEDERATION_UNSTABLE_PREFIX,
FEDERATION_V1_PREFIX,
FEDERATION_V2_PREFIX,
)
from synapse.http.endpoint import parse_and_validate_server_name
from synapse.http.server import JsonResource
from synapse.http.servlet import (
parse_boolean_from_args,
parse_integer_from_args,
parse_json_object_from_request,
parse_string_from_args,
)
from synapse.logging.context import run_in_background
from synapse.logging.opentracing import (
start_active_span,
start_active_span_from_request,
tags,
whitelisted_homeserver,
)
from synapse.server import HomeServer
from synapse.types import ThirdPartyInstanceID, get_domain_from_id
from synapse.util.versionstring import get_version_string
logger = logging.getLogger(__name__)
class TransportLayerServer(JsonResource):
"""Handles incoming federation HTTP requests"""
def __init__(self, hs, servlet_groups=None):
"""Initialize the TransportLayerServer
Will by default register all servlets. For custom behaviour, pass in
a list of servlet_groups to register.
Args:
hs (synapse.server.HomeServer): homeserver
servlet_groups (list[str], optional): List of servlet groups to register.
Defaults to ``DEFAULT_SERVLET_GROUPS``.
"""
self.hs = hs
self.clock = hs.get_clock()
self.servlet_groups = servlet_groups
super().__init__(hs, canonical_json=False)
self.authenticator = Authenticator(hs)
self.ratelimiter = hs.get_federation_ratelimiter()
self.register_servlets()
def register_servlets(self):
register_servlets(
self.hs,
resource=self,
ratelimiter=self.ratelimiter,
authenticator=self.authenticator,
servlet_groups=self.servlet_groups,
)
class AuthenticationError(SynapseError):
"""There was a problem authenticating the request"""
pass
class NoAuthenticationError(AuthenticationError):
"""The request had no authentication information"""
pass
class Authenticator:
def __init__(self, hs: HomeServer):
self._clock = hs.get_clock()
self.keyring = hs.get_keyring()
self.server_name = hs.hostname
self.store = hs.get_datastore()
self.federation_domain_whitelist = hs.config.federation_domain_whitelist
self.notifier = hs.get_notifier()
self.replication_client = None
if hs.config.worker.worker_app:
self.replication_client = hs.get_tcp_replication()
# A method just so we can pass 'self' as the authenticator to the Servlets
async def authenticate_request(self, request, content):
now = self._clock.time_msec()
json_request = {
"method": request.method.decode("ascii"),
"uri": request.uri.decode("ascii"),
"destination": self.server_name,
"signatures": {},
}
if content is not None:
json_request["content"] = content
origin = None
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
if not auth_headers:
raise NoAuthenticationError(
401, "Missing Authorization headers", Codes.UNAUTHORIZED
)
for auth in auth_headers:
if auth.startswith(b"X-Matrix"):
(origin, key, sig) = _parse_auth_header(auth)
json_request["origin"] = origin
json_request["signatures"].setdefault(origin, {})[key] = sig
if (
self.federation_domain_whitelist is not None
and origin not in self.federation_domain_whitelist
):
raise FederationDeniedError(origin)
if not json_request["signatures"]:
raise NoAuthenticationError(
401, "Missing Authorization headers", Codes.UNAUTHORIZED
)
await self.keyring.verify_json_for_server(
origin, json_request, now, "Incoming request"
)
logger.debug("Request from %s", origin)
request.requester = origin
# If we get a valid signed request from the other side, its probably
# alive
retry_timings = await self.store.get_destination_retry_timings(origin)
if retry_timings and retry_timings["retry_last_ts"]:
run_in_background(self._reset_retry_timings, origin)
return origin
async def _reset_retry_timings(self, origin):
try:
logger.info("Marking origin %r as up", origin)
await self.store.set_destination_retry_timings(origin, None, 0, 0)
# Inform the relevant places that the remote server is back up.
self.notifier.notify_remote_server_up(origin)
if self.replication_client:
# If we're on a worker we try and inform master about this. The
# replication client doesn't hook into the notifier to avoid
# infinite loops where we send a `REMOTE_SERVER_UP` command to
# master, which then echoes it back to us which in turn pokes
# the notifier.
self.replication_client.send_remote_server_up(origin)
except Exception:
logger.exception("Error resetting retry timings on %s", origin)
def _parse_auth_header(header_bytes):
"""Parse an X-Matrix auth header
Args:
header_bytes (bytes): header value
Returns:
Tuple[str, str, str]: origin, key id, signature.
Raises:
AuthenticationError if the header could not be parsed
"""
try:
header_str = header_bytes.decode("utf-8")
params = header_str.split(" ")[1].split(",")
param_dict = dict(kv.split("=") for kv in params)
def strip_quotes(value):
if value.startswith('"'):
return value[1:-1]
else:
return value
origin = strip_quotes(param_dict["origin"])
# ensure that the origin is a valid server name
parse_and_validate_server_name(origin)
key = strip_quotes(param_dict["key"])
sig = strip_quotes(param_dict["sig"])
return origin, key, sig
except Exception as e:
logger.warning(
"Error parsing auth header '%s': %s",
header_bytes.decode("ascii", "replace"),
e,
)
raise AuthenticationError(
400, "Malformed Authorization header", Codes.UNAUTHORIZED
)
class BaseFederationServlet:
"""Abstract base class for federation servlet classes.
The servlet object should have a PATH attribute which takes the form of a regexp to
match against the request path (excluding the /federation/v1 prefix).
The servlet should also implement one or more of on_GET, on_POST, on_PUT, to match
the appropriate HTTP method. These methods must be *asynchronous* and have the
signature:
on_<METHOD>(self, origin, content, query, **kwargs)
With arguments:
origin (unicode|None): The authenticated server_name of the calling server,
unless REQUIRE_AUTH is set to False and authentication failed.
content (unicode|None): decoded json body of the request. None if the
request was a GET.
query (dict[bytes, list[bytes]]): Query params from the request. url-decoded
(ie, '+' and '%xx' are decoded) but note that it is *not* utf8-decoded
yet.
**kwargs (dict[unicode, unicode]): the dict mapping keys to path
components as specified in the path match regexp.
Returns:
Optional[Tuple[int, object]]: either (response code, response object) to
return a JSON response, or None if the request has already been handled.
Raises:
SynapseError: to return an error code
Exception: other exceptions will be caught, logged, and a 500 will be
returned.
"""
PATH = "" # Overridden in subclasses, the regex to match against the path.
REQUIRE_AUTH = True
PREFIX = FEDERATION_V1_PREFIX # Allows specifying the API version
RATELIMIT = True # Whether to rate limit requests or not
def __init__(self, handler, authenticator, ratelimiter, server_name):
self.handler = handler
self.authenticator = authenticator
self.ratelimiter = ratelimiter
def _wrap(self, func):
authenticator = self.authenticator
ratelimiter = self.ratelimiter
@functools.wraps(func)
async def new_func(request, *args, **kwargs):
"""A callback which can be passed to HttpServer.RegisterPaths
Args:
request (twisted.web.http.Request):
*args: unused?
**kwargs (dict[unicode, unicode]): the dict mapping keys to path
components as specified in the path match regexp.
Returns:
Tuple[int, object]|None: (response code, response object) as returned by
the callback method. None if the request has already been handled.
"""
content = None
if request.method in [b"PUT", b"POST"]:
# TODO: Handle other method types? other content types?
content = parse_json_object_from_request(request)
try:
origin = await authenticator.authenticate_request(request, content)
except NoAuthenticationError:
origin = None
if self.REQUIRE_AUTH:
logger.warning(
"authenticate_request failed: missing authentication"
)
raise
except Exception as e:
logger.warning("authenticate_request failed: %s", e)
raise
request_tags = {
"request_id": request.get_request_id(),
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
tags.HTTP_METHOD: request.get_method(),
tags.HTTP_URL: request.get_redacted_uri(),
tags.PEER_HOST_IPV6: request.getClientIP(),
"authenticated_entity": origin,
"servlet_name": request.request_metrics.name,
}
# Only accept the span context if the origin is authenticated
# and whitelisted
if origin and whitelisted_homeserver(origin):
scope = start_active_span_from_request(
request, "incoming-federation-request", tags=request_tags
)
else:
scope = start_active_span(
"incoming-federation-request", tags=request_tags
)
with scope:
if origin and self.RATELIMIT:
with ratelimiter.ratelimit(origin) as d:
await d
if request._disconnected:
logger.warning(
"client disconnected before we started processing "
"request"
)
return -1, None
response = await func(
origin, content, request.args, *args, **kwargs
)
else:
response = await func(
origin, content, request.args, *args, **kwargs
)
return response
return new_func
def register(self, server):
pattern = re.compile("^" + self.PREFIX + self.PATH + "$")
for method in ("GET", "PUT", "POST"):
code = getattr(self, "on_%s" % (method), None)
if code is None:
continue
server.register_paths(
method, (pattern,), self._wrap(code), self.__class__.__name__,
)
class FederationSendServlet(BaseFederationServlet):
PATH = "/send/(?P<transaction_id>[^/]*)/?"
# We ratelimit manually in the handler as we queue up the requests and we
# don't want to fill up the ratelimiter with blocked requests.
RATELIMIT = False
def __init__(self, handler, server_name, **kwargs):
super().__init__(handler, server_name=server_name, **kwargs)
self.server_name = server_name
# This is when someone is trying to send us a bunch of data.
async def on_PUT(self, origin, content, query, transaction_id):
""" Called on PUT /send/<transaction_id>/
Args:
request (twisted.web.http.Request): The HTTP request.
transaction_id (str): The transaction_id associated with this
request. This is *not* None.
Returns:
Tuple of `(code, response)`, where
`response` is a python dict to be converted into JSON that is
used as the response body.
"""
# Parse the request
try:
transaction_data = content
logger.debug("Decoded %s: %s", transaction_id, str(transaction_data))
logger.info(
"Received txn %s from %s. (PDUs: %d, EDUs: %d)",
transaction_id,
origin,
len(transaction_data.get("pdus", [])),
len(transaction_data.get("edus", [])),
)
# We should ideally be getting this from the security layer.
# origin = body["origin"]
# Add some extra data to the transaction dict that isn't included
# in the request body.
transaction_data.update(
transaction_id=transaction_id, destination=self.server_name
)
except Exception as e:
logger.exception(e)
return 400, {"error": "Invalid transaction"}
try:
code, response = await self.handler.on_incoming_transaction(
origin, transaction_data
)
except Exception:
logger.exception("on_incoming_transaction failed")
raise
return code, response
class FederationEventServlet(BaseFederationServlet):
PATH = "/event/(?P<event_id>[^/]*)/?"
# This is when someone asks for a data item for a given server data_id pair.
async def on_GET(self, origin, content, query, event_id):
return await self.handler.on_pdu_request(origin, event_id)
class FederationStateV1Servlet(BaseFederationServlet):
PATH = "/state/(?P<room_id>[^/]*)/?"
# This is when someone asks for all data for a given room.
async def on_GET(self, origin, content, query, room_id):
return await self.handler.on_room_state_request(
origin,
room_id,
parse_string_from_args(query, "event_id", None, required=False),
)
class FederationStateIdsServlet(BaseFederationServlet):
PATH = "/state_ids/(?P<room_id>[^/]*)/?"
async def on_GET(self, origin, content, query, room_id):
return await self.handler.on_state_ids_request(
origin,
room_id,
parse_string_from_args(query, "event_id", None, required=True),
)
class FederationBackfillServlet(BaseFederationServlet):
PATH = "/backfill/(?P<room_id>[^/]*)/?"
async def on_GET(self, origin, content, query, room_id):
versions = [x.decode("ascii") for x in query[b"v"]]
limit = parse_integer_from_args(query, "limit", None)
if not limit:
return 400, {"error": "Did not include limit param"}
return await self.handler.on_backfill_request(origin, room_id, versions, limit)
class FederationQueryServlet(BaseFederationServlet):
PATH = "/query/(?P<query_type>[^/]*)"
# This is when we receive a server-server Query
async def on_GET(self, origin, content, query, query_type):
return await self.handler.on_query_request(
query_type,
{k.decode("utf8"): v[0].decode("utf-8") for k, v in query.items()},
)
class FederationMakeJoinServlet(BaseFederationServlet):
PATH = "/make_join/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
async def on_GET(self, origin, _content, query, room_id, user_id):
"""
Args:
origin (unicode): The authenticated server_name of the calling server
_content (None): (GETs don't have bodies)
query (dict[bytes, list[bytes]]): Query params from the request.
**kwargs (dict[unicode, unicode]): the dict mapping keys to path
components as specified in the path match regexp.
Returns:
Tuple[int, object]: (response code, response object)
"""
versions = query.get(b"ver")
if versions is not None:
supported_versions = [v.decode("utf-8") for v in versions]
else:
supported_versions = ["1"]
content = await self.handler.on_make_join_request(
origin, room_id, user_id, supported_versions=supported_versions
)
return 200, content
class FederationMakeLeaveServlet(BaseFederationServlet):
PATH = "/make_leave/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
async def on_GET(self, origin, content, query, room_id, user_id):
content = await self.handler.on_make_leave_request(origin, room_id, user_id)
return 200, content
class FederationV1SendLeaveServlet(BaseFederationServlet):
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_PUT(self, origin, content, query, room_id, event_id):
content = await self.handler.on_send_leave_request(origin, content)
return 200, (200, content)
class FederationV2SendLeaveServlet(BaseFederationServlet):
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
PREFIX = FEDERATION_V2_PREFIX
async def on_PUT(self, origin, content, query, room_id, event_id):
content = await self.handler.on_send_leave_request(origin, content)
return 200, content
class FederationEventAuthServlet(BaseFederationServlet):
PATH = "/event_auth/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_GET(self, origin, content, query, room_id, event_id):
return await self.handler.on_event_auth(origin, room_id, event_id)
class FederationV1SendJoinServlet(BaseFederationServlet):
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_PUT(self, origin, content, query, room_id, event_id):
# TODO(paul): assert that room_id/event_id parsed from path actually
# match those given in content
content = await self.handler.on_send_join_request(origin, content)
return 200, (200, content)
class FederationV2SendJoinServlet(BaseFederationServlet):
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
PREFIX = FEDERATION_V2_PREFIX
async def on_PUT(self, origin, content, query, room_id, event_id):
# TODO(paul): assert that room_id/event_id parsed from path actually
# match those given in content
content = await self.handler.on_send_join_request(origin, content)
return 200, content
class FederationV1InviteServlet(BaseFederationServlet):
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
async def on_PUT(self, origin, content, query, room_id, event_id):
# We don't get a room version, so we have to assume its EITHER v1 or
# v2. This is "fine" as the only difference between V1 and V2 is the
# state resolution algorithm, and we don't use that for processing
# invites
content = await self.handler.on_invite_request(
origin, content, room_version_id=RoomVersions.V1.identifier
)
# V1 federation API is defined to return a content of `[200, {...}]`
# due to a historical bug.
return 200, (200, content)
class FederationV2InviteServlet(BaseFederationServlet):
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
PREFIX = FEDERATION_V2_PREFIX
async def on_PUT(self, origin, content, query, room_id, event_id):
# TODO(paul): assert that room_id/event_id parsed from path actually
# match those given in content
room_version = content["room_version"]
event = content["event"]
invite_room_state = content["invite_room_state"]
# Synapse expects invite_room_state to be in unsigned, as it is in v1
# API
event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state
content = await self.handler.on_invite_request(
origin, event, room_version_id=room_version
)
return 200, content
class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet):
PATH = "/exchange_third_party_invite/(?P<room_id>[^/]*)"
async def on_PUT(self, origin, content, query, room_id):
content = await self.handler.on_exchange_third_party_invite_request(content)
return 200, content
class FederationClientKeysQueryServlet(BaseFederationServlet):
PATH = "/user/keys/query"
async def on_POST(self, origin, content, query):
return await self.handler.on_query_client_keys(origin, content)
class FederationUserDevicesQueryServlet(BaseFederationServlet):
PATH = "/user/devices/(?P<user_id>[^/]*)"
async def on_GET(self, origin, content, query, user_id):
return await self.handler.on_query_user_devices(origin, user_id)
class FederationClientKeysClaimServlet(BaseFederationServlet):
PATH = "/user/keys/claim"
async def on_POST(self, origin, content, query):
response = await self.handler.on_claim_client_keys(origin, content)
return 200, response
class FederationGetMissingEventsServlet(BaseFederationServlet):
# TODO(paul): Why does this path alone end with "/?" optional?
PATH = "/get_missing_events/(?P<room_id>[^/]*)/?"
async def on_POST(self, origin, content, query, room_id):
limit = int(content.get("limit", 10))
earliest_events = content.get("earliest_events", [])
latest_events = content.get("latest_events", [])
content = await self.handler.on_get_missing_events(
origin,
room_id=room_id,
earliest_events=earliest_events,
latest_events=latest_events,
limit=limit,
)
return 200, content
class On3pidBindServlet(BaseFederationServlet):
PATH = "/3pid/onbind"
REQUIRE_AUTH = False
async def on_POST(self, origin, content, query):
if "invites" in content:
last_exception = None
for invite in content["invites"]:
try:
if "signed" not in invite or "token" not in invite["signed"]:
message = (
"Rejecting received notification of third-"
"party invite without signed: %s" % (invite,)
)
logger.info(message)
raise SynapseError(400, message)
await self.handler.exchange_third_party_invite(
invite["sender"],
invite["mxid"],
invite["room_id"],
invite["signed"],
)
except Exception as e:
last_exception = e
if last_exception:
raise last_exception
return 200, {}
class OpenIdUserInfo(BaseFederationServlet):
"""
Exchange a bearer token for information about a user.
The response format should be compatible with:
http://openid.net/specs/openid-connect-core-1_0.html#UserInfoResponse
GET /openid/userinfo?access_token=ABDEFGH HTTP/1.1
HTTP/1.1 200 OK
Content-Type: application/json
{
"sub": "@userpart:example.org",
}
"""
PATH = "/openid/userinfo"
REQUIRE_AUTH = False
async def on_GET(self, origin, content, query):
token = query.get(b"access_token", [None])[0]
if token is None:
return (
401,
{"errcode": "M_MISSING_TOKEN", "error": "Access Token required"},
)
user_id = await self.handler.on_openid_userinfo(token.decode("ascii"))
if user_id is None:
return (
401,
{
"errcode": "M_UNKNOWN_TOKEN",
"error": "Access Token unknown or expired",
},
)
return 200, {"sub": user_id}
class PublicRoomList(BaseFederationServlet):
"""
Fetch the public room list for this server.
This API returns information in the same format as /publicRooms on the
client API, but will only ever include local public rooms and hence is
intended for consumption by other homeservers.
GET /publicRooms HTTP/1.1
HTTP/1.1 200 OK
Content-Type: application/json
{
"chunk": [
{
"aliases": [
"#test:localhost"
],
"guest_can_join": false,
"name": "test room",
"num_joined_members": 3,
"room_id": "!whkydVegtvatLfXmPN:localhost",
"world_readable": false
}
],
"end": "END",
"start": "START"
}
"""
PATH = "/publicRooms"
def __init__(self, handler, authenticator, ratelimiter, server_name, allow_access):
super().__init__(handler, authenticator, ratelimiter, server_name)
self.allow_access = allow_access
async def on_GET(self, origin, content, query):
if not self.allow_access:
raise FederationDeniedError(origin)
limit = parse_integer_from_args(query, "limit", 0)
since_token = parse_string_from_args(query, "since", None)
include_all_networks = parse_boolean_from_args(
query, "include_all_networks", False
)
third_party_instance_id = parse_string_from_args(
query, "third_party_instance_id", None
)
if include_all_networks:
network_tuple = None
elif third_party_instance_id:
network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
else:
network_tuple = ThirdPartyInstanceID(None, None)
if limit == 0:
# zero is a special value which corresponds to no limit.
limit = None
data = await self.handler.get_local_public_room_list(
limit, since_token, network_tuple=network_tuple, from_federation=True
)
return 200, data
async def on_POST(self, origin, content, query):
# This implements MSC2197 (Search Filtering over Federation)
if not self.allow_access:
raise FederationDeniedError(origin)
limit = int(content.get("limit", 100)) # type: Optional[int]
since_token = content.get("since", None)
search_filter = content.get("filter", None)
include_all_networks = content.get("include_all_networks", False)
third_party_instance_id = content.get("third_party_instance_id", None)
if include_all_networks:
network_tuple = None
if third_party_instance_id is not None:
raise SynapseError(
400, "Can't use include_all_networks with an explicit network"
)
elif third_party_instance_id is None:
network_tuple = ThirdPartyInstanceID(None, None)
else:
network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
if search_filter is None:
logger.warning("Nonefilter")
if limit == 0:
# zero is a special value which corresponds to no limit.
limit = None
data = await self.handler.get_local_public_room_list(
limit=limit,
since_token=since_token,
search_filter=search_filter,
network_tuple=network_tuple,
from_federation=True,
)
return 200, data
class FederationVersionServlet(BaseFederationServlet):
PATH = "/version"
REQUIRE_AUTH = False
async def on_GET(self, origin, content, query):
return (
200,
{"server": {"name": "Synapse", "version": get_version_string(synapse)}},
)
class FederationGroupsProfileServlet(BaseFederationServlet):
"""Get/set the basic profile of a group on behalf of a user
"""
PATH = "/groups/(?P<group_id>[^/]*)/profile"
async def on_GET(self, origin, content, query, group_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_group_profile(group_id, requester_user_id)
return 200, new_content
async def on_POST(self, origin, content, query, group_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.update_group_profile(
group_id, requester_user_id, content
)
return 200, new_content
class FederationGroupsSummaryServlet(BaseFederationServlet):
PATH = "/groups/(?P<group_id>[^/]*)/summary"
async def on_GET(self, origin, content, query, group_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_group_summary(group_id, requester_user_id)
return 200, new_content
class FederationGroupsRoomsServlet(BaseFederationServlet):
"""Get the rooms in a group on behalf of a user
"""
PATH = "/groups/(?P<group_id>[^/]*)/rooms"
async def on_GET(self, origin, content, query, group_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_rooms_in_group(group_id, requester_user_id)
return 200, new_content
class FederationGroupsAddRoomsServlet(BaseFederationServlet):
"""Add/remove room from group
"""
PATH = "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
async def on_POST(self, origin, content, query, group_id, room_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.add_room_to_group(
group_id, requester_user_id, room_id, content
)
return 200, new_content
async def on_DELETE(self, origin, content, query, group_id, room_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.remove_room_from_group(
group_id, requester_user_id, room_id
)
return 200, new_content
class FederationGroupsAddRoomsConfigServlet(BaseFederationServlet):
"""Update room config in group
"""
PATH = (
"/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
"/config/(?P<config_key>[^/]*)"
)
async def on_POST(self, origin, content, query, group_id, room_id, config_key):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
result = await self.handler.update_room_in_group(
group_id, requester_user_id, room_id, config_key, content
)
return 200, result
class FederationGroupsUsersServlet(BaseFederationServlet):
"""Get the users in a group on behalf of a user
"""
PATH = "/groups/(?P<group_id>[^/]*)/users"
async def on_GET(self, origin, content, query, group_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_users_in_group(group_id, requester_user_id)
return 200, new_content
class FederationGroupsInvitedUsersServlet(BaseFederationServlet):
"""Get the users that have been invited to a group
"""
PATH = "/groups/(?P<group_id>[^/]*)/invited_users"
async def on_GET(self, origin, content, query, group_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_invited_users_in_group(
group_id, requester_user_id
)
return 200, new_content
class FederationGroupsInviteServlet(BaseFederationServlet):
"""Ask a group server to invite someone to the group
"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
async def on_POST(self, origin, content, query, group_id, user_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.invite_to_group(
group_id, user_id, requester_user_id, content
)
return 200, new_content
class FederationGroupsAcceptInviteServlet(BaseFederationServlet):
"""Accept an invitation from the group server
"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/accept_invite"
async def on_POST(self, origin, content, query, group_id, user_id):
if get_domain_from_id(user_id) != origin:
raise SynapseError(403, "user_id doesn't match origin")
new_content = await self.handler.accept_invite(group_id, user_id, content)
return 200, new_content
class FederationGroupsJoinServlet(BaseFederationServlet):
"""Attempt to join a group
"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/join"
async def on_POST(self, origin, content, query, group_id, user_id):
if get_domain_from_id(user_id) != origin:
raise SynapseError(403, "user_id doesn't match origin")
new_content = await self.handler.join_group(group_id, user_id, content)
return 200, new_content
class FederationGroupsRemoveUserServlet(BaseFederationServlet):
"""Leave or kick a user from the group
"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
async def on_POST(self, origin, content, query, group_id, user_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.remove_user_from_group(
group_id, user_id, requester_user_id, content
)
return 200, new_content
class FederationGroupsLocalInviteServlet(BaseFederationServlet):
"""A group server has invited a local user
"""
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
async def on_POST(self, origin, content, query, group_id, user_id):
if get_domain_from_id(group_id) != origin:
raise SynapseError(403, "group_id doesn't match origin")
new_content = await self.handler.on_invite(group_id, user_id, content)
return 200, new_content
class FederationGroupsRemoveLocalUserServlet(BaseFederationServlet):
"""A group server has removed a local user
"""
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
async def on_POST(self, origin, content, query, group_id, user_id):
if get_domain_from_id(group_id) != origin:
raise SynapseError(403, "user_id doesn't match origin")
new_content = await self.handler.user_removed_from_group(
group_id, user_id, content
)
return 200, new_content
class FederationGroupsRenewAttestaionServlet(BaseFederationServlet):
"""A group or user's server renews their attestation
"""
PATH = "/groups/(?P<group_id>[^/]*)/renew_attestation/(?P<user_id>[^/]*)"
async def on_POST(self, origin, content, query, group_id, user_id):
# We don't need to check auth here as we check the attestation signatures
new_content = await self.handler.on_renew_attestation(
group_id, user_id, content
)
return 200, new_content
class FederationGroupsSummaryRoomsServlet(BaseFederationServlet):
"""Add/remove a room from the group summary, with optional category.
Matches both:
- /groups/:group/summary/rooms/:room_id
- /groups/:group/summary/categories/:category/rooms/:room_id
"""
PATH = (
"/groups/(?P<group_id>[^/]*)/summary"
"(/categories/(?P<category_id>[^/]+))?"
"/rooms/(?P<room_id>[^/]*)"
)
async def on_POST(self, origin, content, query, group_id, category_id, room_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(400, "category_id cannot be empty string")
resp = await self.handler.update_group_summary_room(
group_id,
requester_user_id,
room_id=room_id,
category_id=category_id,
content=content,
)
return 200, resp
async def on_DELETE(self, origin, content, query, group_id, category_id, room_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(400, "category_id cannot be empty string")
resp = await self.handler.delete_group_summary_room(
group_id, requester_user_id, room_id=room_id, category_id=category_id
)
return 200, resp
class FederationGroupsCategoriesServlet(BaseFederationServlet):
"""Get all categories for a group
"""
PATH = "/groups/(?P<group_id>[^/]*)/categories/?"
async def on_GET(self, origin, content, query, group_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
resp = await self.handler.get_group_categories(group_id, requester_user_id)
return 200, resp
class FederationGroupsCategoryServlet(BaseFederationServlet):
"""Add/remove/get a category in a group
"""
PATH = "/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)"
async def on_GET(self, origin, content, query, group_id, category_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
resp = await self.handler.get_group_category(
group_id, requester_user_id, category_id
)
return 200, resp
async def on_POST(self, origin, content, query, group_id, category_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(400, "category_id cannot be empty string")
resp = await self.handler.upsert_group_category(
group_id, requester_user_id, category_id, content
)
return 200, resp
async def on_DELETE(self, origin, content, query, group_id, category_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(400, "category_id cannot be empty string")
resp = await self.handler.delete_group_category(
group_id, requester_user_id, category_id
)
return 200, resp
class FederationGroupsRolesServlet(BaseFederationServlet):
"""Get roles in a group
"""
PATH = "/groups/(?P<group_id>[^/]*)/roles/?"
async def on_GET(self, origin, content, query, group_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
resp = await self.handler.get_group_roles(group_id, requester_user_id)
return 200, resp
class FederationGroupsRoleServlet(BaseFederationServlet):
"""Add/remove/get a role in a group
"""
PATH = "/groups/(?P<group_id>[^/]*)/roles/(?P<role_id>[^/]+)"
async def on_GET(self, origin, content, query, group_id, role_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
resp = await self.handler.get_group_role(group_id, requester_user_id, role_id)
return 200, resp
async def on_POST(self, origin, content, query, group_id, role_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if role_id == "":
raise SynapseError(400, "role_id cannot be empty string")
resp = await self.handler.update_group_role(
group_id, requester_user_id, role_id, content
)
return 200, resp
async def on_DELETE(self, origin, content, query, group_id, role_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if role_id == "":
raise SynapseError(400, "role_id cannot be empty string")
resp = await self.handler.delete_group_role(
group_id, requester_user_id, role_id
)
return 200, resp
class FederationGroupsSummaryUsersServlet(BaseFederationServlet):
"""Add/remove a user from the group summary, with optional role.
Matches both:
- /groups/:group/summary/users/:user_id
- /groups/:group/summary/roles/:role/users/:user_id
"""
PATH = (
"/groups/(?P<group_id>[^/]*)/summary"
"(/roles/(?P<role_id>[^/]+))?"
"/users/(?P<user_id>[^/]*)"
)
async def on_POST(self, origin, content, query, group_id, role_id, user_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if role_id == "":
raise SynapseError(400, "role_id cannot be empty string")
resp = await self.handler.update_group_summary_user(
group_id,
requester_user_id,
user_id=user_id,
role_id=role_id,
content=content,
)
return 200, resp
async def on_DELETE(self, origin, content, query, group_id, role_id, user_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if role_id == "":
raise SynapseError(400, "role_id cannot be empty string")
resp = await self.handler.delete_group_summary_user(
group_id, requester_user_id, user_id=user_id, role_id=role_id
)
return 200, resp
class FederationGroupsBulkPublicisedServlet(BaseFederationServlet):
"""Get roles in a group
"""
PATH = "/get_groups_publicised"
async def on_POST(self, origin, content, query):
resp = await self.handler.bulk_get_publicised_groups(
content["user_ids"], proxy=False
)
return 200, resp
class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet):
"""Sets whether a group is joinable without an invite or knock
"""
PATH = "/groups/(?P<group_id>[^/]*)/settings/m.join_policy"
async def on_PUT(self, origin, content, query, group_id):
requester_user_id = parse_string_from_args(query, "requester_user_id")
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.set_group_join_policy(
group_id, requester_user_id, content
)
return 200, new_content
class RoomComplexityServlet(BaseFederationServlet):
"""
Indicates to other servers how complex (and therefore likely
resource-intensive) a public room this server knows about is.
"""
PATH = "/rooms/(?P<room_id>[^/]*)/complexity"
PREFIX = FEDERATION_UNSTABLE_PREFIX
async def on_GET(self, origin, content, query, room_id):
store = self.handler.hs.get_datastore()
is_public = await store.is_room_world_readable_or_publicly_joinable(room_id)
if not is_public:
raise SynapseError(404, "Room not found", errcode=Codes.INVALID_PARAM)
complexity = await store.get_room_complexity(room_id)
return 200, complexity
FEDERATION_SERVLET_CLASSES = (
FederationSendServlet,
FederationEventServlet,
FederationStateV1Servlet,
FederationStateIdsServlet,
FederationBackfillServlet,
FederationQueryServlet,
FederationMakeJoinServlet,
FederationMakeLeaveServlet,
FederationEventServlet,
FederationV1SendJoinServlet,
FederationV2SendJoinServlet,
FederationV1SendLeaveServlet,
FederationV2SendLeaveServlet,
FederationV1InviteServlet,
FederationV2InviteServlet,
FederationGetMissingEventsServlet,
FederationEventAuthServlet,
FederationClientKeysQueryServlet,
FederationUserDevicesQueryServlet,
FederationClientKeysClaimServlet,
FederationThirdPartyInviteExchangeServlet,
On3pidBindServlet,
FederationVersionServlet,
RoomComplexityServlet,
) # type: Tuple[Type[BaseFederationServlet], ...]
OPENID_SERVLET_CLASSES = (
OpenIdUserInfo,
) # type: Tuple[Type[BaseFederationServlet], ...]
ROOM_LIST_CLASSES = (PublicRoomList,) # type: Tuple[Type[PublicRoomList], ...]
GROUP_SERVER_SERVLET_CLASSES = (
FederationGroupsProfileServlet,
FederationGroupsSummaryServlet,
FederationGroupsRoomsServlet,
FederationGroupsUsersServlet,
FederationGroupsInvitedUsersServlet,
FederationGroupsInviteServlet,
FederationGroupsAcceptInviteServlet,
FederationGroupsJoinServlet,
FederationGroupsRemoveUserServlet,
FederationGroupsSummaryRoomsServlet,
FederationGroupsCategoriesServlet,
FederationGroupsCategoryServlet,
FederationGroupsRolesServlet,
FederationGroupsRoleServlet,
FederationGroupsSummaryUsersServlet,
FederationGroupsAddRoomsServlet,
FederationGroupsAddRoomsConfigServlet,
FederationGroupsSettingJoinPolicyServlet,
) # type: Tuple[Type[BaseFederationServlet], ...]
GROUP_LOCAL_SERVLET_CLASSES = (
FederationGroupsLocalInviteServlet,
FederationGroupsRemoveLocalUserServlet,
FederationGroupsBulkPublicisedServlet,
) # type: Tuple[Type[BaseFederationServlet], ...]
GROUP_ATTESTATION_SERVLET_CLASSES = (
FederationGroupsRenewAttestaionServlet,
) # type: Tuple[Type[BaseFederationServlet], ...]
DEFAULT_SERVLET_GROUPS = (
"federation",
"room_list",
"group_server",
"group_local",
"group_attestation",
"openid",
)
def register_servlets(hs, resource, authenticator, ratelimiter, servlet_groups=None):
"""Initialize and register servlet classes.
Will by default register all servlets. For custom behaviour, pass in
a list of servlet_groups to register.
Args:
hs (synapse.server.HomeServer): homeserver
resource (TransportLayerServer): resource class to register to
authenticator (Authenticator): authenticator to use
ratelimiter (util.ratelimitutils.FederationRateLimiter): ratelimiter to use
servlet_groups (list[str], optional): List of servlet groups to register.
Defaults to ``DEFAULT_SERVLET_GROUPS``.
"""
if not servlet_groups:
servlet_groups = DEFAULT_SERVLET_GROUPS
if "federation" in servlet_groups:
for servletclass in FEDERATION_SERVLET_CLASSES:
servletclass(
handler=hs.get_federation_server(),
authenticator=authenticator,
ratelimiter=ratelimiter,
server_name=hs.hostname,
).register(resource)
if "openid" in servlet_groups:
for servletclass in OPENID_SERVLET_CLASSES:
servletclass(
handler=hs.get_federation_server(),
authenticator=authenticator,
ratelimiter=ratelimiter,
server_name=hs.hostname,
).register(resource)
if "room_list" in servlet_groups:
for servletclass in ROOM_LIST_CLASSES:
servletclass(
handler=hs.get_room_list_handler(),
authenticator=authenticator,
ratelimiter=ratelimiter,
server_name=hs.hostname,
allow_access=hs.config.allow_public_rooms_over_federation,
).register(resource)
if "group_server" in servlet_groups:
for servletclass in GROUP_SERVER_SERVLET_CLASSES:
servletclass(
handler=hs.get_groups_server_handler(),
authenticator=authenticator,
ratelimiter=ratelimiter,
server_name=hs.hostname,
).register(resource)
if "group_local" in servlet_groups:
for servletclass in GROUP_LOCAL_SERVLET_CLASSES:
servletclass(
handler=hs.get_groups_local_handler(),
authenticator=authenticator,
ratelimiter=ratelimiter,
server_name=hs.hostname,
).register(resource)
if "group_attestation" in servlet_groups:
for servletclass in GROUP_ATTESTATION_SERVLET_CLASSES:
servletclass(
handler=hs.get_groups_attestation_renewer(),
authenticator=authenticator,
ratelimiter=ratelimiter,
server_name=hs.hostname,
).register(resource)
| ./CrossVul/dataset_final_sorted/CWE-400/py/good_4373_2 |
crossvul-python_data_good_1916_3 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import logging
import random
import sys
import urllib.parse
from io import BytesIO
from typing import Callable, Dict, List, Optional, Tuple, Union
import attr
import treq
from canonicaljson import encode_canonical_json
from prometheus_client import Counter
from signedjson.sign import sign_json
from twisted.internet import defer
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import IReactorTime
from twisted.internet.task import _EPSILON, Cooperator
from twisted.web.http_headers import Headers
from twisted.web.iweb import IBodyProducer, IResponse
import synapse.metrics
import synapse.util.retryutils
from synapse.api.errors import (
Codes,
FederationDeniedError,
HttpResponseException,
RequestSendFailed,
SynapseError,
)
from synapse.http import QuieterFileBodyProducer
from synapse.http.client import (
BlacklistingAgentWrapper,
BlacklistingReactorWrapper,
BodyExceededMaxSize,
encode_query_args,
read_body_with_max_size,
)
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.opentracing import (
inject_active_span_byte_dict,
set_tag,
start_active_span,
tags,
)
from synapse.types import JsonDict
from synapse.util import json_decoder
from synapse.util.async_helpers import timeout_deferred
from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)
outgoing_requests_counter = Counter(
"synapse_http_matrixfederationclient_requests", "", ["method"]
)
incoming_responses_counter = Counter(
"synapse_http_matrixfederationclient_responses", "", ["method", "code"]
)
MAX_LONG_RETRIES = 10
MAX_SHORT_RETRIES = 3
MAXINT = sys.maxsize
_next_id = 1
QueryArgs = Dict[str, Union[str, List[str]]]
@attr.s(slots=True, frozen=True)
class MatrixFederationRequest:
method = attr.ib(type=str)
"""HTTP method
"""
path = attr.ib(type=str)
"""HTTP path
"""
destination = attr.ib(type=str)
"""The remote server to send the HTTP request to.
"""
json = attr.ib(default=None, type=Optional[JsonDict])
"""JSON to send in the body.
"""
json_callback = attr.ib(default=None, type=Optional[Callable[[], JsonDict]])
"""A callback to generate the JSON.
"""
query = attr.ib(default=None, type=Optional[dict])
"""Query arguments.
"""
txn_id = attr.ib(default=None, type=Optional[str])
"""Unique ID for this request (for logging)
"""
uri = attr.ib(init=False, type=bytes)
"""The URI of this request
"""
def __attrs_post_init__(self) -> None:
global _next_id
txn_id = "%s-O-%s" % (self.method, _next_id)
_next_id = (_next_id + 1) % (MAXINT - 1)
object.__setattr__(self, "txn_id", txn_id)
destination_bytes = self.destination.encode("ascii")
path_bytes = self.path.encode("ascii")
if self.query:
query_bytes = encode_query_args(self.query)
else:
query_bytes = b""
# The object is frozen so we can pre-compute this.
uri = urllib.parse.urlunparse(
(b"matrix", destination_bytes, path_bytes, None, query_bytes, b"")
)
object.__setattr__(self, "uri", uri)
def get_json(self) -> Optional[JsonDict]:
if self.json_callback:
return self.json_callback()
return self.json
async def _handle_json_response(
reactor: IReactorTime,
timeout_sec: float,
request: MatrixFederationRequest,
response: IResponse,
start_ms: int,
) -> JsonDict:
"""
Reads the JSON body of a response, with a timeout
Args:
reactor: twisted reactor, for the timeout
timeout_sec: number of seconds to wait for response to complete
request: the request that triggered the response
response: response to the request
start_ms: Timestamp when request was made
Returns:
The parsed JSON response
"""
try:
check_content_type_is_json(response.headers)
# Use the custom JSON decoder (partially re-implements treq.json_content).
d = treq.text_content(response, encoding="utf-8")
d.addCallback(json_decoder.decode)
d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor)
body = await make_deferred_yieldable(d)
except defer.TimeoutError as e:
logger.warning(
"{%s} [%s] Timed out reading response - %s %s",
request.txn_id,
request.destination,
request.method,
request.uri.decode("ascii"),
)
raise RequestSendFailed(e, can_retry=True) from e
except Exception as e:
logger.warning(
"{%s} [%s] Error reading response %s %s: %s",
request.txn_id,
request.destination,
request.method,
request.uri.decode("ascii"),
e,
)
raise
time_taken_secs = reactor.seconds() - start_ms / 1000
logger.info(
"{%s} [%s] Completed request: %d %s in %.2f secs - %s %s",
request.txn_id,
request.destination,
response.code,
response.phrase.decode("ascii", errors="replace"),
time_taken_secs,
request.method,
request.uri.decode("ascii"),
)
return body
class MatrixFederationHttpClient:
"""HTTP client used to talk to other homeservers over the federation
protocol. Send client certificates and signs requests.
Attributes:
agent (twisted.web.client.Agent): The twisted Agent used to send the
requests.
"""
def __init__(self, hs, tls_client_options_factory):
self.hs = hs
self.signing_key = hs.signing_key
self.server_name = hs.hostname
# We need to use a DNS resolver which filters out blacklisted IP
# addresses, to prevent DNS rebinding.
self.reactor = BlacklistingReactorWrapper(
hs.get_reactor(), None, hs.config.federation_ip_range_blacklist
)
user_agent = hs.version_string
if hs.config.user_agent_suffix:
user_agent = "%s %s" % (user_agent, hs.config.user_agent_suffix)
user_agent = user_agent.encode("ascii")
self.agent = MatrixFederationAgent(
self.reactor,
tls_client_options_factory,
user_agent,
hs.config.federation_ip_range_blacklist,
)
# Use a BlacklistingAgentWrapper to prevent circumventing the IP
# blacklist via IP literals in server names
self.agent = BlacklistingAgentWrapper(
self.agent, ip_blacklist=hs.config.federation_ip_range_blacklist,
)
self.clock = hs.get_clock()
self._store = hs.get_datastore()
self.version_string_bytes = hs.version_string.encode("ascii")
self.default_timeout = 60
def schedule(x):
self.reactor.callLater(_EPSILON, x)
self._cooperator = Cooperator(scheduler=schedule)
async def _send_request_with_optional_trailing_slash(
self,
request: MatrixFederationRequest,
try_trailing_slash_on_400: bool = False,
**send_request_args
) -> IResponse:
"""Wrapper for _send_request which can optionally retry the request
upon receiving a combination of a 400 HTTP response code and a
'M_UNRECOGNIZED' errcode. This is a workaround for Synapse <= v0.99.3
due to #3622.
Args:
request: details of request to be sent
try_trailing_slash_on_400: Whether on receiving a 400
'M_UNRECOGNIZED' from the server to retry the request with a
trailing slash appended to the request path.
send_request_args: A dictionary of arguments to pass to `_send_request()`.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
Returns:
Parsed JSON response body.
"""
try:
response = await self._send_request(request, **send_request_args)
except HttpResponseException as e:
# Received an HTTP error > 300. Check if it meets the requirements
# to retry with a trailing slash
if not try_trailing_slash_on_400:
raise
if e.code != 400 or e.to_synapse_error().errcode != "M_UNRECOGNIZED":
raise
# Retry with a trailing slash if we received a 400 with
# 'M_UNRECOGNIZED' which some endpoints can return when omitting a
# trailing slash on Synapse <= v0.99.3.
logger.info("Retrying request with trailing slash")
# Request is frozen so we create a new instance
request = attr.evolve(request, path=request.path + "/")
response = await self._send_request(request, **send_request_args)
return response
async def _send_request(
self,
request: MatrixFederationRequest,
retry_on_dns_fail: bool = True,
timeout: Optional[int] = None,
long_retries: bool = False,
ignore_backoff: bool = False,
backoff_on_404: bool = False,
) -> IResponse:
"""
Sends a request to the given server.
Args:
request: details of request to be sent
retry_on_dns_fail: true if the request should be retied on DNS failures
timeout: number of milliseconds to wait for the response headers
(including connecting to the server), *for each attempt*.
60s by default.
long_retries: whether to use the long retry algorithm.
The regular retry algorithm makes 4 attempts, with intervals
[0.5s, 1s, 2s].
The long retry algorithm makes 11 attempts, with intervals
[4s, 16s, 60s, 60s, ...]
Both algorithms add -20%/+40% jitter to the retry intervals.
Note that the above intervals are *in addition* to the time spent
waiting for the request to complete (up to `timeout` ms).
NB: the long retry algorithm takes over 20 minutes to complete, with
a default timeout of 60s!
ignore_backoff: true to ignore the historical backoff data
and try the request anyway.
backoff_on_404: Back off if we get a 404
Returns:
Resolves with the HTTP response object on success.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
if timeout:
_sec_timeout = timeout / 1000
else:
_sec_timeout = self.default_timeout
if (
self.hs.config.federation_domain_whitelist is not None
and request.destination not in self.hs.config.federation_domain_whitelist
):
raise FederationDeniedError(request.destination)
limiter = await synapse.util.retryutils.get_retry_limiter(
request.destination,
self.clock,
self._store,
backoff_on_404=backoff_on_404,
ignore_backoff=ignore_backoff,
)
method_bytes = request.method.encode("ascii")
destination_bytes = request.destination.encode("ascii")
path_bytes = request.path.encode("ascii")
if request.query:
query_bytes = encode_query_args(request.query)
else:
query_bytes = b""
scope = start_active_span(
"outgoing-federation-request",
tags={
tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT,
tags.PEER_ADDRESS: request.destination,
tags.HTTP_METHOD: request.method,
tags.HTTP_URL: request.path,
},
finish_on_close=True,
)
# Inject the span into the headers
headers_dict = {} # type: Dict[bytes, List[bytes]]
inject_active_span_byte_dict(headers_dict, request.destination)
headers_dict[b"User-Agent"] = [self.version_string_bytes]
with limiter, scope:
# XXX: Would be much nicer to retry only at the transaction-layer
# (once we have reliable transactions in place)
if long_retries:
retries_left = MAX_LONG_RETRIES
else:
retries_left = MAX_SHORT_RETRIES
url_bytes = request.uri
url_str = url_bytes.decode("ascii")
url_to_sign_bytes = urllib.parse.urlunparse(
(b"", b"", path_bytes, None, query_bytes, b"")
)
while True:
try:
json = request.get_json()
if json:
headers_dict[b"Content-Type"] = [b"application/json"]
auth_headers = self.build_auth_headers(
destination_bytes, method_bytes, url_to_sign_bytes, json
)
data = encode_canonical_json(json)
producer = QuieterFileBodyProducer(
BytesIO(data), cooperator=self._cooperator
) # type: Optional[IBodyProducer]
else:
producer = None
auth_headers = self.build_auth_headers(
destination_bytes, method_bytes, url_to_sign_bytes
)
headers_dict[b"Authorization"] = auth_headers
logger.debug(
"{%s} [%s] Sending request: %s %s; timeout %fs",
request.txn_id,
request.destination,
request.method,
url_str,
_sec_timeout,
)
outgoing_requests_counter.labels(request.method).inc()
try:
with Measure(self.clock, "outbound_request"):
# we don't want all the fancy cookie and redirect handling
# that treq.request gives: just use the raw Agent.
request_deferred = self.agent.request(
method_bytes,
url_bytes,
headers=Headers(headers_dict),
bodyProducer=producer,
)
request_deferred = timeout_deferred(
request_deferred,
timeout=_sec_timeout,
reactor=self.reactor,
)
response = await request_deferred
except DNSLookupError as e:
raise RequestSendFailed(e, can_retry=retry_on_dns_fail) from e
except Exception as e:
raise RequestSendFailed(e, can_retry=True) from e
incoming_responses_counter.labels(
request.method, response.code
).inc()
set_tag(tags.HTTP_STATUS_CODE, response.code)
response_phrase = response.phrase.decode("ascii", errors="replace")
if 200 <= response.code < 300:
logger.debug(
"{%s} [%s] Got response headers: %d %s",
request.txn_id,
request.destination,
response.code,
response_phrase,
)
pass
else:
logger.info(
"{%s} [%s] Got response headers: %d %s",
request.txn_id,
request.destination,
response.code,
response_phrase,
)
# :'(
# Update transactions table?
d = treq.content(response)
d = timeout_deferred(
d, timeout=_sec_timeout, reactor=self.reactor
)
try:
body = await make_deferred_yieldable(d)
except Exception as e:
# Eh, we're already going to raise an exception so lets
# ignore if this fails.
logger.warning(
"{%s} [%s] Failed to get error response: %s %s: %s",
request.txn_id,
request.destination,
request.method,
url_str,
_flatten_response_never_received(e),
)
body = None
exc = HttpResponseException(
response.code, response_phrase, body
)
# Retry if the error is a 429 (Too Many Requests),
# otherwise just raise a standard HttpResponseException
if response.code == 429:
raise RequestSendFailed(exc, can_retry=True) from exc
else:
raise exc
break
except RequestSendFailed as e:
logger.info(
"{%s} [%s] Request failed: %s %s: %s",
request.txn_id,
request.destination,
request.method,
url_str,
_flatten_response_never_received(e.inner_exception),
)
if not e.can_retry:
raise
if retries_left and not timeout:
if long_retries:
delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left)
delay = min(delay, 60)
delay *= random.uniform(0.8, 1.4)
else:
delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left)
delay = min(delay, 2)
delay *= random.uniform(0.8, 1.4)
logger.debug(
"{%s} [%s] Waiting %ss before re-sending...",
request.txn_id,
request.destination,
delay,
)
await self.clock.sleep(delay)
retries_left -= 1
else:
raise
except Exception as e:
logger.warning(
"{%s} [%s] Request failed: %s %s: %s",
request.txn_id,
request.destination,
request.method,
url_str,
_flatten_response_never_received(e),
)
raise
return response
def build_auth_headers(
self,
destination: Optional[bytes],
method: bytes,
url_bytes: bytes,
content: Optional[JsonDict] = None,
destination_is: Optional[bytes] = None,
) -> List[bytes]:
"""
Builds the Authorization headers for a federation request
Args:
destination: The destination homeserver of the request.
May be None if the destination is an identity server, in which case
destination_is must be non-None.
method: The HTTP method of the request
url_bytes: The URI path of the request
content: The body of the request
destination_is: As 'destination', but if the destination is an
identity server
Returns:
A list of headers to be added as "Authorization:" headers
"""
request = {
"method": method.decode("ascii"),
"uri": url_bytes.decode("ascii"),
"origin": self.server_name,
}
if destination is not None:
request["destination"] = destination.decode("ascii")
if destination_is is not None:
request["destination_is"] = destination_is.decode("ascii")
if content is not None:
request["content"] = content
request = sign_json(request, self.server_name, self.signing_key)
auth_headers = []
for key, sig in request["signatures"][self.server_name].items():
auth_headers.append(
(
'X-Matrix origin=%s,key="%s",sig="%s"'
% (self.server_name, key, sig)
).encode("ascii")
)
return auth_headers
async def put_json(
self,
destination: str,
path: str,
args: Optional[QueryArgs] = None,
data: Optional[JsonDict] = None,
json_data_callback: Optional[Callable[[], JsonDict]] = None,
long_retries: bool = False,
timeout: Optional[int] = None,
ignore_backoff: bool = False,
backoff_on_404: bool = False,
try_trailing_slash_on_400: bool = False,
) -> Union[JsonDict, list]:
""" Sends the specified json data using PUT
Args:
destination: The remote server to send the HTTP request to.
path: The HTTP path.
args: query params
data: A dict containing the data that will be used as
the request body. This will be encoded as JSON.
json_data_callback: A callable returning the dict to
use as the request body.
long_retries: whether to use the long retry algorithm. See
docs on _send_request for details.
timeout: number of milliseconds to wait for the response.
self._default_timeout (60s) by default.
Note that we may make several attempts to send the request; this
timeout applies to the time spent waiting for response headers for
*each* attempt (including connection time) as well as the time spent
reading the response body after a 200 response.
ignore_backoff: true to ignore the historical backoff data
and try the request anyway.
backoff_on_404: True if we should count a 404 response as
a failure of the server (and should therefore back off future
requests).
try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED
response we should try appending a trailing slash to the end
of the request. Workaround for #3622 in Synapse <= v0.99.3. This
will be attempted before backing off if backing off has been
enabled.
Returns:
Succeeds when we get a 2xx HTTP response. The
result will be the decoded JSON body.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
method="PUT",
destination=destination,
path=path,
query=args,
json_callback=json_data_callback,
json=data,
)
start_ms = self.clock.time_msec()
response = await self._send_request_with_optional_trailing_slash(
request,
try_trailing_slash_on_400,
backoff_on_404=backoff_on_404,
ignore_backoff=ignore_backoff,
long_retries=long_retries,
timeout=timeout,
)
if timeout is not None:
_sec_timeout = timeout / 1000
else:
_sec_timeout = self.default_timeout
body = await _handle_json_response(
self.reactor, _sec_timeout, request, response, start_ms
)
return body
async def post_json(
self,
destination: str,
path: str,
data: Optional[JsonDict] = None,
long_retries: bool = False,
timeout: Optional[int] = None,
ignore_backoff: bool = False,
args: Optional[QueryArgs] = None,
) -> Union[JsonDict, list]:
""" Sends the specified json data using POST
Args:
destination: The remote server to send the HTTP request to.
path: The HTTP path.
data: A dict containing the data that will be used as
the request body. This will be encoded as JSON.
long_retries: whether to use the long retry algorithm. See
docs on _send_request for details.
timeout: number of milliseconds to wait for the response.
self._default_timeout (60s) by default.
Note that we may make several attempts to send the request; this
timeout applies to the time spent waiting for response headers for
*each* attempt (including connection time) as well as the time spent
reading the response body after a 200 response.
ignore_backoff: true to ignore the historical backoff data and
try the request anyway.
args: query params
Returns:
dict|list: Succeeds when we get a 2xx HTTP response. The
result will be the decoded JSON body.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
method="POST", destination=destination, path=path, query=args, json=data
)
start_ms = self.clock.time_msec()
response = await self._send_request(
request,
long_retries=long_retries,
timeout=timeout,
ignore_backoff=ignore_backoff,
)
if timeout:
_sec_timeout = timeout / 1000
else:
_sec_timeout = self.default_timeout
body = await _handle_json_response(
self.reactor, _sec_timeout, request, response, start_ms,
)
return body
async def get_json(
self,
destination: str,
path: str,
args: Optional[QueryArgs] = None,
retry_on_dns_fail: bool = True,
timeout: Optional[int] = None,
ignore_backoff: bool = False,
try_trailing_slash_on_400: bool = False,
) -> Union[JsonDict, list]:
""" GETs some json from the given host homeserver and path
Args:
destination: The remote server to send the HTTP request to.
path: The HTTP path.
args: A dictionary used to create query strings, defaults to
None.
timeout: number of milliseconds to wait for the response.
self._default_timeout (60s) by default.
Note that we may make several attempts to send the request; this
timeout applies to the time spent waiting for response headers for
*each* attempt (including connection time) as well as the time spent
reading the response body after a 200 response.
ignore_backoff: true to ignore the historical backoff data
and try the request anyway.
try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED
response we should try appending a trailing slash to the end of
the request. Workaround for #3622 in Synapse <= v0.99.3.
Returns:
Succeeds when we get a 2xx HTTP response. The
result will be the decoded JSON body.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
method="GET", destination=destination, path=path, query=args
)
start_ms = self.clock.time_msec()
response = await self._send_request_with_optional_trailing_slash(
request,
try_trailing_slash_on_400,
backoff_on_404=False,
ignore_backoff=ignore_backoff,
retry_on_dns_fail=retry_on_dns_fail,
timeout=timeout,
)
if timeout is not None:
_sec_timeout = timeout / 1000
else:
_sec_timeout = self.default_timeout
body = await _handle_json_response(
self.reactor, _sec_timeout, request, response, start_ms
)
return body
async def delete_json(
self,
destination: str,
path: str,
long_retries: bool = False,
timeout: Optional[int] = None,
ignore_backoff: bool = False,
args: Optional[QueryArgs] = None,
) -> Union[JsonDict, list]:
"""Send a DELETE request to the remote expecting some json response
Args:
destination: The remote server to send the HTTP request to.
path: The HTTP path.
long_retries: whether to use the long retry algorithm. See
docs on _send_request for details.
timeout: number of milliseconds to wait for the response.
self._default_timeout (60s) by default.
Note that we may make several attempts to send the request; this
timeout applies to the time spent waiting for response headers for
*each* attempt (including connection time) as well as the time spent
reading the response body after a 200 response.
ignore_backoff: true to ignore the historical backoff data and
try the request anyway.
args: query params
Returns:
Succeeds when we get a 2xx HTTP response. The
result will be the decoded JSON body.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
method="DELETE", destination=destination, path=path, query=args
)
start_ms = self.clock.time_msec()
response = await self._send_request(
request,
long_retries=long_retries,
timeout=timeout,
ignore_backoff=ignore_backoff,
)
if timeout is not None:
_sec_timeout = timeout / 1000
else:
_sec_timeout = self.default_timeout
body = await _handle_json_response(
self.reactor, _sec_timeout, request, response, start_ms
)
return body
async def get_file(
self,
destination: str,
path: str,
output_stream,
args: Optional[QueryArgs] = None,
retry_on_dns_fail: bool = True,
max_size: Optional[int] = None,
ignore_backoff: bool = False,
) -> Tuple[int, Dict[bytes, List[bytes]]]:
"""GETs a file from a given homeserver
Args:
destination: The remote server to send the HTTP request to.
path: The HTTP path to GET.
output_stream: File to write the response body to.
args: Optional dictionary used to create the query string.
ignore_backoff: true to ignore the historical backoff data
and try the request anyway.
Returns:
Resolves with an (int,dict) tuple of
the file length and a dict of the response headers.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
"""
request = MatrixFederationRequest(
method="GET", destination=destination, path=path, query=args
)
response = await self._send_request(
request, retry_on_dns_fail=retry_on_dns_fail, ignore_backoff=ignore_backoff
)
headers = dict(response.headers.getAllRawHeaders())
try:
d = read_body_with_max_size(response, output_stream, max_size)
d.addTimeout(self.default_timeout, self.reactor)
length = await make_deferred_yieldable(d)
except BodyExceededMaxSize:
msg = "Requested file is too large > %r bytes" % (max_size,)
logger.warning(
"{%s} [%s] %s", request.txn_id, request.destination, msg,
)
SynapseError(502, msg, Codes.TOO_LARGE)
except Exception as e:
logger.warning(
"{%s} [%s] Error reading response: %s",
request.txn_id,
request.destination,
e,
)
raise
logger.info(
"{%s} [%s] Completed: %d %s [%d bytes] %s %s",
request.txn_id,
request.destination,
response.code,
response.phrase.decode("ascii", errors="replace"),
length,
request.method,
request.uri.decode("ascii"),
)
return (length, headers)
def _flatten_response_never_received(e):
if hasattr(e, "reasons"):
reasons = ", ".join(
_flatten_response_never_received(f.value) for f in e.reasons
)
return "%s:[%s]" % (type(e).__name__, reasons)
else:
return repr(e)
def check_content_type_is_json(headers: Headers) -> None:
"""
Check that a set of HTTP headers have a Content-Type header, and that it
is application/json.
Args:
headers: headers to check
Raises:
RequestSendFailed: if the Content-Type header is missing or isn't JSON
"""
c_type = headers.getRawHeaders(b"Content-Type")
if c_type is None:
raise RequestSendFailed(
RuntimeError("No Content-Type header received from remote server"),
can_retry=False,
)
c_type = c_type[0].decode("ascii") # only the first header
val, options = cgi.parse_header(c_type)
if val != "application/json":
raise RequestSendFailed(
RuntimeError(
"Remote server sent Content-Type header of '%s', not 'application/json'"
% c_type,
),
can_retry=False,
)
| ./CrossVul/dataset_final_sorted/CWE-400/py/good_1916_3 |
crossvul-python_data_good_1890_2 | # All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception):
pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse):
pass
class RedirectLimit(HttpLib2ErrorWithResponse):
pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse):
pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse):
pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse):
pass
class MalformedHeader(HttpLib2Error):
pass
class RelativeURIError(HttpLib2Error):
pass
class ServerNotFoundError(HttpLib2Error):
pass
class ProxiesUnavailableError(HttpLib2Error):
pass
| ./CrossVul/dataset_final_sorted/CWE-400/py/good_1890_2 |
crossvul-python_data_good_4373_3 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2017-2018 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains handlers for federation events."""
import itertools
import logging
from collections.abc import Container
from http import HTTPStatus
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import attr
from signedjson.key import decode_verify_key_bytes
from signedjson.sign import verify_signed_json
from unpaddedbase64 import decode_base64
from twisted.internet import defer
from synapse import event_auth
from synapse.api.constants import (
EventTypes,
Membership,
RejectedReason,
RoomEncryptionAlgorithms,
)
from synapse.api.errors import (
AuthError,
CodeMessageException,
Codes,
FederationDeniedError,
FederationError,
HttpResponseException,
NotFoundError,
RequestSendFailed,
SynapseError,
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions
from synapse.crypto.event_signing import compute_event_signature
from synapse.event_auth import auth_types_for_event
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.events.validator import EventValidator
from synapse.handlers._base import BaseHandler
from synapse.http.servlet import assert_params_in_dict
from synapse.logging.context import (
make_deferred_yieldable,
nested_logging_context,
preserve_fn,
run_in_background,
)
from synapse.logging.utils import log_function
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
from synapse.replication.http.federation import (
ReplicationCleanRoomRestServlet,
ReplicationFederationSendEventsRestServlet,
ReplicationStoreRoomOnInviteRestServlet,
)
from synapse.state import StateResolutionStore
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.types import (
JsonDict,
MutableStateMap,
PersistedEventPosition,
RoomStreamToken,
StateMap,
UserID,
get_domain_from_id,
)
from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.retryutils import NotRetryingDestination
from synapse.util.stringutils import shortstr
from synapse.visibility import filter_events_for_server
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@attr.s(slots=True)
class _NewEventInfo:
"""Holds information about a received event, ready for passing to _handle_new_events
Attributes:
event: the received event
state: the state at that event
auth_events: the auth_event map for that event
"""
event = attr.ib(type=EventBase)
state = attr.ib(type=Optional[Sequence[EventBase]], default=None)
auth_events = attr.ib(type=Optional[MutableStateMap[EventBase]], default=None)
class FederationHandler(BaseHandler):
"""Handles events that originated from federation.
Responsible for:
a) handling received Pdus before handing them on as Events to the rest
of the homeserver (including auth and state conflict resolutions)
b) converting events that were produced by local clients that may need
to be sent to remote homeservers.
c) doing the necessary dances to invite remote users and join remote
rooms.
"""
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.hs = hs
self.store = hs.get_datastore()
self.storage = hs.get_storage()
self.state_store = self.storage.state
self.federation_client = hs.get_federation_client()
self.state_handler = hs.get_state_handler()
self._state_resolution_handler = hs.get_state_resolution_handler()
self.server_name = hs.hostname
self.keyring = hs.get_keyring()
self.action_generator = hs.get_action_generator()
self.is_mine_id = hs.is_mine_id
self.spam_checker = hs.get_spam_checker()
self.event_creation_handler = hs.get_event_creation_handler()
self._message_handler = hs.get_message_handler()
self._server_notices_mxid = hs.config.server_notices_mxid
self.config = hs.config
self.http_client = hs.get_simple_http_client()
self._instance_name = hs.get_instance_name()
self._replication = hs.get_replication_data_handler()
self._send_events = ReplicationFederationSendEventsRestServlet.make_client(hs)
self._clean_room_for_join_client = ReplicationCleanRoomRestServlet.make_client(
hs
)
if hs.config.worker_app:
self._user_device_resync = ReplicationUserDevicesResyncRestServlet.make_client(
hs
)
self._maybe_store_room_on_invite = ReplicationStoreRoomOnInviteRestServlet.make_client(
hs
)
else:
self._device_list_updater = hs.get_device_handler().device_list_updater
self._maybe_store_room_on_invite = self.store.maybe_store_room_on_invite
# When joining a room we need to queue any events for that room up.
# For each room, a list of (pdu, origin) tuples.
self.room_queues = {} # type: Dict[str, List[Tuple[EventBase, str]]]
self._room_pdu_linearizer = Linearizer("fed_room_pdu")
self.third_party_event_rules = hs.get_third_party_event_rules()
self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages
async def on_receive_pdu(self, origin, pdu, sent_to_us_directly=False) -> None:
""" Process a PDU received via a federation /send/ transaction, or
via backfill of missing prev_events
Args:
origin (str): server which initiated the /send/ transaction. Will
be used to fetch missing events or state.
pdu (FrozenEvent): received PDU
sent_to_us_directly (bool): True if this event was pushed to us; False if
we pulled it as the result of a missing prev_event.
"""
room_id = pdu.room_id
event_id = pdu.event_id
logger.info("handling received PDU: %s", pdu)
# We reprocess pdus when we have seen them only as outliers
existing = await self.store.get_event(
event_id, allow_none=True, allow_rejected=True
)
# FIXME: Currently we fetch an event again when we already have it
# if it has been marked as an outlier.
already_seen = existing and (
not existing.internal_metadata.is_outlier()
or pdu.internal_metadata.is_outlier()
)
if already_seen:
logger.debug("[%s %s]: Already seen pdu", room_id, event_id)
return
# do some initial sanity-checking of the event. In particular, make
# sure it doesn't have hundreds of prev_events or auth_events, which
# could cause a huge state resolution or cascade of event fetches.
try:
self._sanity_check_event(pdu)
except SynapseError as err:
logger.warning(
"[%s %s] Received event failed sanity checks", room_id, event_id
)
raise FederationError("ERROR", err.code, err.msg, affected=pdu.event_id)
# If we are currently in the process of joining this room, then we
# queue up events for later processing.
if room_id in self.room_queues:
logger.info(
"[%s %s] Queuing PDU from %s for now: join in progress",
room_id,
event_id,
origin,
)
self.room_queues[room_id].append((pdu, origin))
return
# If we're not in the room just ditch the event entirely. This is
# probably an old server that has come back and thinks we're still in
# the room (or we've been rejoined to the room by a state reset).
#
# Note that if we were never in the room then we would have already
# dropped the event, since we wouldn't know the room version.
is_in_room = await self.auth.check_host_in_room(room_id, self.server_name)
if not is_in_room:
logger.info(
"[%s %s] Ignoring PDU from %s as we're not in the room",
room_id,
event_id,
origin,
)
return None
state = None
# Get missing pdus if necessary.
if not pdu.internal_metadata.is_outlier():
# We only backfill backwards to the min depth.
min_depth = await self.get_min_depth_for_context(pdu.room_id)
logger.debug("[%s %s] min_depth: %d", room_id, event_id, min_depth)
prevs = set(pdu.prev_event_ids())
seen = await self.store.have_events_in_timeline(prevs)
if min_depth is not None and pdu.depth < min_depth:
# This is so that we don't notify the user about this
# message, to work around the fact that some events will
# reference really really old events we really don't want to
# send to the clients.
pdu.internal_metadata.outlier = True
elif min_depth is not None and pdu.depth > min_depth:
missing_prevs = prevs - seen
if sent_to_us_directly and missing_prevs:
# If we're missing stuff, ensure we only fetch stuff one
# at a time.
logger.info(
"[%s %s] Acquiring room lock to fetch %d missing prev_events: %s",
room_id,
event_id,
len(missing_prevs),
shortstr(missing_prevs),
)
with (await self._room_pdu_linearizer.queue(pdu.room_id)):
logger.info(
"[%s %s] Acquired room lock to fetch %d missing prev_events",
room_id,
event_id,
len(missing_prevs),
)
try:
await self._get_missing_events_for_pdu(
origin, pdu, prevs, min_depth
)
except Exception as e:
raise Exception(
"Error fetching missing prev_events for %s: %s"
% (event_id, e)
) from e
# Update the set of things we've seen after trying to
# fetch the missing stuff
seen = await self.store.have_events_in_timeline(prevs)
if not prevs - seen:
logger.info(
"[%s %s] Found all missing prev_events",
room_id,
event_id,
)
if prevs - seen:
# We've still not been able to get all of the prev_events for this event.
#
# In this case, we need to fall back to asking another server in the
# federation for the state at this event. That's ok provided we then
# resolve the state against other bits of the DAG before using it (which
# will ensure that you can't just take over a room by sending an event,
# withholding its prev_events, and declaring yourself to be an admin in
# the subsequent state request).
#
# Now, if we're pulling this event as a missing prev_event, then clearly
# this event is not going to become the only forward-extremity and we are
# guaranteed to resolve its state against our existing forward
# extremities, so that should be fine.
#
# On the other hand, if this event was pushed to us, it is possible for
# it to become the only forward-extremity in the room, and we would then
# trust its state to be the state for the whole room. This is very bad.
# Further, if the event was pushed to us, there is no excuse for us not to
# have all the prev_events. We therefore reject any such events.
#
# XXX this really feels like it could/should be merged with the above,
# but there is an interaction with min_depth that I'm not really
# following.
if sent_to_us_directly:
logger.warning(
"[%s %s] Rejecting: failed to fetch %d prev events: %s",
room_id,
event_id,
len(prevs - seen),
shortstr(prevs - seen),
)
raise FederationError(
"ERROR",
403,
(
"Your server isn't divulging details about prev_events "
"referenced in this event."
),
affected=pdu.event_id,
)
logger.info(
"Event %s is missing prev_events: calculating state for a "
"backwards extremity",
event_id,
)
# Calculate the state after each of the previous events, and
# resolve them to find the correct state at the current event.
event_map = {event_id: pdu}
try:
# Get the state of the events we know about
ours = await self.state_store.get_state_groups_ids(room_id, seen)
# state_maps is a list of mappings from (type, state_key) to event_id
state_maps = list(ours.values()) # type: List[StateMap[str]]
# we don't need this any more, let's delete it.
del ours
# Ask the remote server for the states we don't
# know about
for p in prevs - seen:
logger.info(
"Requesting state at missing prev_event %s", event_id,
)
with nested_logging_context(p):
# note that if any of the missing prevs share missing state or
# auth events, the requests to fetch those events are deduped
# by the get_pdu_cache in federation_client.
(remote_state, _,) = await self._get_state_for_room(
origin, room_id, p, include_event_in_state=True
)
remote_state_map = {
(x.type, x.state_key): x.event_id for x in remote_state
}
state_maps.append(remote_state_map)
for x in remote_state:
event_map[x.event_id] = x
room_version = await self.store.get_room_version_id(room_id)
state_map = await self._state_resolution_handler.resolve_events_with_store(
room_id,
room_version,
state_maps,
event_map,
state_res_store=StateResolutionStore(self.store),
)
# We need to give _process_received_pdu the actual state events
# rather than event ids, so generate that now.
# First though we need to fetch all the events that are in
# state_map, so we can build up the state below.
evs = await self.store.get_events(
list(state_map.values()),
get_prev_content=False,
redact_behaviour=EventRedactBehaviour.AS_IS,
)
event_map.update(evs)
state = [event_map[e] for e in state_map.values()]
except Exception:
logger.warning(
"[%s %s] Error attempting to resolve state at missing "
"prev_events",
room_id,
event_id,
exc_info=True,
)
raise FederationError(
"ERROR",
403,
"We can't get valid state history.",
affected=event_id,
)
await self._process_received_pdu(origin, pdu, state=state)
async def _get_missing_events_for_pdu(self, origin, pdu, prevs, min_depth):
"""
Args:
origin (str): Origin of the pdu. Will be called to get the missing events
pdu: received pdu
prevs (set(str)): List of event ids which we are missing
min_depth (int): Minimum depth of events to return.
"""
room_id = pdu.room_id
event_id = pdu.event_id
seen = await self.store.have_events_in_timeline(prevs)
if not prevs - seen:
return
latest_list = await self.store.get_latest_event_ids_in_room(room_id)
# We add the prev events that we have seen to the latest
# list to ensure the remote server doesn't give them to us
latest = set(latest_list)
latest |= seen
logger.info(
"[%s %s]: Requesting missing events between %s and %s",
room_id,
event_id,
shortstr(latest),
event_id,
)
# XXX: we set timeout to 10s to help workaround
# https://github.com/matrix-org/synapse/issues/1733.
# The reason is to avoid holding the linearizer lock
# whilst processing inbound /send transactions, causing
# FDs to stack up and block other inbound transactions
# which empirically can currently take up to 30 minutes.
#
# N.B. this explicitly disables retry attempts.
#
# N.B. this also increases our chances of falling back to
# fetching fresh state for the room if the missing event
# can't be found, which slightly reduces our security.
# it may also increase our DAG extremity count for the room,
# causing additional state resolution? See #1760.
# However, fetching state doesn't hold the linearizer lock
# apparently.
#
# see https://github.com/matrix-org/synapse/pull/1744
#
# ----
#
# Update richvdh 2018/09/18: There are a number of problems with timing this
# request out aggressively on the client side:
#
# - it plays badly with the server-side rate-limiter, which starts tarpitting you
# if you send too many requests at once, so you end up with the server carefully
# working through the backlog of your requests, which you have already timed
# out.
#
# - for this request in particular, we now (as of
# https://github.com/matrix-org/synapse/pull/3456) reject any PDUs where the
# server can't produce a plausible-looking set of prev_events - so we becone
# much more likely to reject the event.
#
# - contrary to what it says above, we do *not* fall back to fetching fresh state
# for the room if get_missing_events times out. Rather, we give up processing
# the PDU whose prevs we are missing, which then makes it much more likely that
# we'll end up back here for the *next* PDU in the list, which exacerbates the
# problem.
#
# - the aggressive 10s timeout was introduced to deal with incoming federation
# requests taking 8 hours to process. It's not entirely clear why that was going
# on; certainly there were other issues causing traffic storms which are now
# resolved, and I think in any case we may be more sensible about our locking
# now. We're *certainly* more sensible about our logging.
#
# All that said: Let's try increasing the timeout to 60s and see what happens.
try:
missing_events = await self.federation_client.get_missing_events(
origin,
room_id,
earliest_events_ids=list(latest),
latest_events=[pdu],
limit=10,
min_depth=min_depth,
timeout=60000,
)
except (RequestSendFailed, HttpResponseException, NotRetryingDestination) as e:
# We failed to get the missing events, but since we need to handle
# the case of `get_missing_events` not returning the necessary
# events anyway, it is safe to simply log the error and continue.
logger.warning(
"[%s %s]: Failed to get prev_events: %s", room_id, event_id, e
)
return
logger.info(
"[%s %s]: Got %d prev_events: %s",
room_id,
event_id,
len(missing_events),
shortstr(missing_events),
)
# We want to sort these by depth so we process them and
# tell clients about them in order.
missing_events.sort(key=lambda x: x.depth)
for ev in missing_events:
logger.info(
"[%s %s] Handling received prev_event %s",
room_id,
event_id,
ev.event_id,
)
with nested_logging_context(ev.event_id):
try:
await self.on_receive_pdu(origin, ev, sent_to_us_directly=False)
except FederationError as e:
if e.code == 403:
logger.warning(
"[%s %s] Received prev_event %s failed history check.",
room_id,
event_id,
ev.event_id,
)
else:
raise
async def _get_state_for_room(
self,
destination: str,
room_id: str,
event_id: str,
include_event_in_state: bool = False,
) -> Tuple[List[EventBase], List[EventBase]]:
"""Requests all of the room state at a given event from a remote homeserver.
Args:
destination: The remote homeserver to query for the state.
room_id: The id of the room we're interested in.
event_id: The id of the event we want the state at.
include_event_in_state: if true, the event itself will be included in the
returned state event list.
Returns:
A list of events in the state, possibly including the event itself, and
a list of events in the auth chain for the given event.
"""
(
state_event_ids,
auth_event_ids,
) = await self.federation_client.get_room_state_ids(
destination, room_id, event_id=event_id
)
desired_events = set(state_event_ids + auth_event_ids)
if include_event_in_state:
desired_events.add(event_id)
event_map = await self._get_events_from_store_or_dest(
destination, room_id, desired_events
)
failed_to_fetch = desired_events - event_map.keys()
if failed_to_fetch:
logger.warning(
"Failed to fetch missing state/auth events for %s %s",
event_id,
failed_to_fetch,
)
remote_state = [
event_map[e_id] for e_id in state_event_ids if e_id in event_map
]
if include_event_in_state:
remote_event = event_map.get(event_id)
if not remote_event:
raise Exception("Unable to get missing prev_event %s" % (event_id,))
if remote_event.is_state() and remote_event.rejected_reason is None:
remote_state.append(remote_event)
auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map]
auth_chain.sort(key=lambda e: e.depth)
return remote_state, auth_chain
async def _get_events_from_store_or_dest(
self, destination: str, room_id: str, event_ids: Iterable[str]
) -> Dict[str, EventBase]:
"""Fetch events from a remote destination, checking if we already have them.
Persists any events we don't already have as outliers.
If we fail to fetch any of the events, a warning will be logged, and the event
will be omitted from the result. Likewise, any events which turn out not to
be in the given room.
This function *does not* automatically get missing auth events of the
newly fetched events. Callers must include the full auth chain of
of the missing events in the `event_ids` argument, to ensure that any
missing auth events are correctly fetched.
Returns:
map from event_id to event
"""
fetched_events = await self.store.get_events(event_ids, allow_rejected=True)
missing_events = set(event_ids) - fetched_events.keys()
if missing_events:
logger.debug(
"Fetching unknown state/auth events %s for room %s",
missing_events,
room_id,
)
await self._get_events_and_persist(
destination=destination, room_id=room_id, events=missing_events
)
# we need to make sure we re-load from the database to get the rejected
# state correct.
fetched_events.update(
(await self.store.get_events(missing_events, allow_rejected=True))
)
# check for events which were in the wrong room.
#
# this can happen if a remote server claims that the state or
# auth_events at an event in room A are actually events in room B
bad_events = [
(event_id, event.room_id)
for event_id, event in fetched_events.items()
if event.room_id != room_id
]
for bad_event_id, bad_room_id in bad_events:
# This is a bogus situation, but since we may only discover it a long time
# after it happened, we try our best to carry on, by just omitting the
# bad events from the returned auth/state set.
logger.warning(
"Remote server %s claims event %s in room %s is an auth/state "
"event in room %s",
destination,
bad_event_id,
bad_room_id,
room_id,
)
del fetched_events[bad_event_id]
return fetched_events
async def _process_received_pdu(
self, origin: str, event: EventBase, state: Optional[Iterable[EventBase]],
):
""" Called when we have a new pdu. We need to do auth checks and put it
through the StateHandler.
Args:
origin: server sending the event
event: event to be persisted
state: Normally None, but if we are handling a gap in the graph
(ie, we are missing one or more prev_events), the resolved state at the
event
"""
room_id = event.room_id
event_id = event.event_id
logger.debug("[%s %s] Processing event: %s", room_id, event_id, event)
try:
await self._handle_new_event(origin, event, state=state)
except AuthError as e:
raise FederationError("ERROR", e.code, e.msg, affected=event.event_id)
# For encrypted messages we check that we know about the sending device,
# if we don't then we mark the device cache for that user as stale.
if event.type == EventTypes.Encrypted:
device_id = event.content.get("device_id")
sender_key = event.content.get("sender_key")
cached_devices = await self.store.get_cached_devices_for_user(event.sender)
resync = False # Whether we should resync device lists.
device = None
if device_id is not None:
device = cached_devices.get(device_id)
if device is None:
logger.info(
"Received event from remote device not in our cache: %s %s",
event.sender,
device_id,
)
resync = True
# We also check if the `sender_key` matches what we expect.
if sender_key is not None:
# Figure out what sender key we're expecting. If we know the
# device and recognize the algorithm then we can work out the
# exact key to expect. Otherwise check it matches any key we
# have for that device.
current_keys = [] # type: Container[str]
if device:
keys = device.get("keys", {}).get("keys", {})
if (
event.content.get("algorithm")
== RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2
):
# For this algorithm we expect a curve25519 key.
key_name = "curve25519:%s" % (device_id,)
current_keys = [keys.get(key_name)]
else:
# We don't know understand the algorithm, so we just
# check it matches a key for the device.
current_keys = keys.values()
elif device_id:
# We don't have any keys for the device ID.
pass
else:
# The event didn't include a device ID, so we just look for
# keys across all devices.
current_keys = [
key
for device in cached_devices.values()
for key in device.get("keys", {}).get("keys", {}).values()
]
# We now check that the sender key matches (one of) the expected
# keys.
if sender_key not in current_keys:
logger.info(
"Received event from remote device with unexpected sender key: %s %s: %s",
event.sender,
device_id or "<no device_id>",
sender_key,
)
resync = True
if resync:
run_as_background_process(
"resync_device_due_to_pdu", self._resync_device, event.sender
)
async def _resync_device(self, sender: str) -> None:
"""We have detected that the device list for the given user may be out
of sync, so we try and resync them.
"""
try:
await self.store.mark_remote_user_device_cache_as_stale(sender)
# Immediately attempt a resync in the background
if self.config.worker_app:
await self._user_device_resync(user_id=sender)
else:
await self._device_list_updater.user_device_resync(sender)
except Exception:
logger.exception("Failed to resync device for %s", sender)
@log_function
async def backfill(self, dest, room_id, limit, extremities):
""" Trigger a backfill request to `dest` for the given `room_id`
This will attempt to get more events from the remote. If the other side
has no new events to offer, this will return an empty list.
As the events are received, we check their signatures, and also do some
sanity-checking on them. If any of the backfilled events are invalid,
this method throws a SynapseError.
TODO: make this more useful to distinguish failures of the remote
server from invalid events (there is probably no point in trying to
re-fetch invalid events from every other HS in the room.)
"""
if dest == self.server_name:
raise SynapseError(400, "Can't backfill from self.")
events = await self.federation_client.backfill(
dest, room_id, limit=limit, extremities=extremities
)
if not events:
return []
# ideally we'd sanity check the events here for excess prev_events etc,
# but it's hard to reject events at this point without completely
# breaking backfill in the same way that it is currently broken by
# events whose signature we cannot verify (#3121).
#
# So for now we accept the events anyway. #3124 tracks this.
#
# for ev in events:
# self._sanity_check_event(ev)
# Don't bother processing events we already have.
seen_events = await self.store.have_events_in_timeline(
{e.event_id for e in events}
)
events = [e for e in events if e.event_id not in seen_events]
if not events:
return []
event_map = {e.event_id: e for e in events}
event_ids = {e.event_id for e in events}
# build a list of events whose prev_events weren't in the batch.
# (XXX: this will include events whose prev_events we already have; that doesn't
# sound right?)
edges = [ev.event_id for ev in events if set(ev.prev_event_ids()) - event_ids]
logger.info("backfill: Got %d events with %d edges", len(events), len(edges))
# For each edge get the current state.
auth_events = {}
state_events = {}
events_to_state = {}
for e_id in edges:
state, auth = await self._get_state_for_room(
destination=dest,
room_id=room_id,
event_id=e_id,
include_event_in_state=False,
)
auth_events.update({a.event_id: a for a in auth})
auth_events.update({s.event_id: s for s in state})
state_events.update({s.event_id: s for s in state})
events_to_state[e_id] = state
required_auth = {
a_id
for event in events
+ list(state_events.values())
+ list(auth_events.values())
for a_id in event.auth_event_ids()
}
auth_events.update(
{e_id: event_map[e_id] for e_id in required_auth if e_id in event_map}
)
ev_infos = []
# Step 1: persist the events in the chunk we fetched state for (i.e.
# the backwards extremities), with custom auth events and state
for e_id in events_to_state:
# For paranoia we ensure that these events are marked as
# non-outliers
ev = event_map[e_id]
assert not ev.internal_metadata.is_outlier()
ev_infos.append(
_NewEventInfo(
event=ev,
state=events_to_state[e_id],
auth_events={
(
auth_events[a_id].type,
auth_events[a_id].state_key,
): auth_events[a_id]
for a_id in ev.auth_event_ids()
if a_id in auth_events
},
)
)
if ev_infos:
await self._handle_new_events(dest, room_id, ev_infos, backfilled=True)
# Step 2: Persist the rest of the events in the chunk one by one
events.sort(key=lambda e: e.depth)
for event in events:
if event in events_to_state:
continue
# For paranoia we ensure that these events are marked as
# non-outliers
assert not event.internal_metadata.is_outlier()
# We store these one at a time since each event depends on the
# previous to work out the state.
# TODO: We can probably do something more clever here.
await self._handle_new_event(dest, event, backfilled=True)
return events
async def maybe_backfill(
self, room_id: str, current_depth: int, limit: int
) -> bool:
"""Checks the database to see if we should backfill before paginating,
and if so do.
Args:
room_id
current_depth: The depth from which we're paginating from. This is
used to decide if we should backfill and what extremities to
use.
limit: The number of events that the pagination request will
return. This is used as part of the heuristic to decide if we
should back paginate.
"""
extremities = await self.store.get_oldest_events_with_depth_in_room(room_id)
if not extremities:
logger.debug("Not backfilling as no extremeties found.")
return False
# We only want to paginate if we can actually see the events we'll get,
# as otherwise we'll just spend a lot of resources to get redacted
# events.
#
# We do this by filtering all the backwards extremities and seeing if
# any remain. Given we don't have the extremity events themselves, we
# need to actually check the events that reference them.
#
# *Note*: the spec wants us to keep backfilling until we reach the start
# of the room in case we are allowed to see some of the history. However
# in practice that causes more issues than its worth, as a) its
# relatively rare for there to be any visible history and b) even when
# there is its often sufficiently long ago that clients would stop
# attempting to paginate before backfill reached the visible history.
#
# TODO: If we do do a backfill then we should filter the backwards
# extremities to only include those that point to visible portions of
# history.
#
# TODO: Correctly handle the case where we are allowed to see the
# forward event but not the backward extremity, e.g. in the case of
# initial join of the server where we are allowed to see the join
# event but not anything before it. This would require looking at the
# state *before* the event, ignoring the special casing certain event
# types have.
forward_events = await self.store.get_successor_events(list(extremities))
extremities_events = await self.store.get_events(
forward_events,
redact_behaviour=EventRedactBehaviour.AS_IS,
get_prev_content=False,
)
# We set `check_history_visibility_only` as we might otherwise get false
# positives from users having been erased.
filtered_extremities = await filter_events_for_server(
self.storage,
self.server_name,
list(extremities_events.values()),
redact=False,
check_history_visibility_only=True,
)
if not filtered_extremities:
return False
# Check if we reached a point where we should start backfilling.
sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1]))
max_depth = sorted_extremeties_tuple[0][1]
# If we're approaching an extremity we trigger a backfill, otherwise we
# no-op.
#
# We chose twice the limit here as then clients paginating backwards
# will send pagination requests that trigger backfill at least twice
# using the most recent extremity before it gets removed (see below). We
# chose more than one times the limit in case of failure, but choosing a
# much larger factor will result in triggering a backfill request much
# earlier than necessary.
if current_depth - 2 * limit > max_depth:
logger.debug(
"Not backfilling as we don't need to. %d < %d - 2 * %d",
max_depth,
current_depth,
limit,
)
return False
logger.debug(
"room_id: %s, backfill: current_depth: %s, max_depth: %s, extrems: %s",
room_id,
current_depth,
max_depth,
sorted_extremeties_tuple,
)
# We ignore extremities that have a greater depth than our current depth
# as:
# 1. we don't really care about getting events that have happened
# before our current position; and
# 2. we have likely previously tried and failed to backfill from that
# extremity, so to avoid getting "stuck" requesting the same
# backfill repeatedly we drop those extremities.
filtered_sorted_extremeties_tuple = [
t for t in sorted_extremeties_tuple if int(t[1]) <= current_depth
]
# However, we need to check that the filtered extremities are non-empty.
# If they are empty then either we can a) bail or b) still attempt to
# backill. We opt to try backfilling anyway just in case we do get
# relevant events.
if filtered_sorted_extremeties_tuple:
sorted_extremeties_tuple = filtered_sorted_extremeties_tuple
# We don't want to specify too many extremities as it causes the backfill
# request URI to be too long.
extremities = dict(sorted_extremeties_tuple[:5])
# Now we need to decide which hosts to hit first.
# First we try hosts that are already in the room
# TODO: HEURISTIC ALERT.
curr_state = await self.state_handler.get_current_state(room_id)
def get_domains_from_state(state):
"""Get joined domains from state
Args:
state (dict[tuple, FrozenEvent]): State map from type/state
key to event.
Returns:
list[tuple[str, int]]: Returns a list of servers with the
lowest depth of their joins. Sorted by lowest depth first.
"""
joined_users = [
(state_key, int(event.depth))
for (e_type, state_key), event in state.items()
if e_type == EventTypes.Member and event.membership == Membership.JOIN
]
joined_domains = {} # type: Dict[str, int]
for u, d in joined_users:
try:
dom = get_domain_from_id(u)
old_d = joined_domains.get(dom)
if old_d:
joined_domains[dom] = min(d, old_d)
else:
joined_domains[dom] = d
except Exception:
pass
return sorted(joined_domains.items(), key=lambda d: d[1])
curr_domains = get_domains_from_state(curr_state)
likely_domains = [
domain for domain, depth in curr_domains if domain != self.server_name
]
async def try_backfill(domains):
# TODO: Should we try multiple of these at a time?
for dom in domains:
try:
await self.backfill(
dom, room_id, limit=100, extremities=extremities
)
# If this succeeded then we probably already have the
# appropriate stuff.
# TODO: We can probably do something more intelligent here.
return True
except SynapseError as e:
logger.info("Failed to backfill from %s because %s", dom, e)
continue
except HttpResponseException as e:
if 400 <= e.code < 500:
raise e.to_synapse_error()
logger.info("Failed to backfill from %s because %s", dom, e)
continue
except CodeMessageException as e:
if 400 <= e.code < 500:
raise
logger.info("Failed to backfill from %s because %s", dom, e)
continue
except NotRetryingDestination as e:
logger.info(str(e))
continue
except RequestSendFailed as e:
logger.info("Failed to get backfill from %s because %s", dom, e)
continue
except FederationDeniedError as e:
logger.info(e)
continue
except Exception as e:
logger.exception("Failed to backfill from %s because %s", dom, e)
continue
return False
success = await try_backfill(likely_domains)
if success:
return True
# Huh, well *those* domains didn't work out. Lets try some domains
# from the time.
tried_domains = set(likely_domains)
tried_domains.add(self.server_name)
event_ids = list(extremities.keys())
logger.debug("calling resolve_state_groups in _maybe_backfill")
resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events)
states = await make_deferred_yieldable(
defer.gatherResults(
[resolve(room_id, [e]) for e in event_ids], consumeErrors=True
)
)
# dict[str, dict[tuple, str]], a map from event_id to state map of
# event_ids.
states = dict(zip(event_ids, [s.state for s in states]))
state_map = await self.store.get_events(
[e_id for ids in states.values() for e_id in ids.values()],
get_prev_content=False,
)
states = {
key: {
k: state_map[e_id]
for k, e_id in state_dict.items()
if e_id in state_map
}
for key, state_dict in states.items()
}
for e_id, _ in sorted_extremeties_tuple:
likely_domains = get_domains_from_state(states[e_id])
success = await try_backfill(
[dom for dom, _ in likely_domains if dom not in tried_domains]
)
if success:
return True
tried_domains.update(dom for dom, _ in likely_domains)
return False
async def _get_events_and_persist(
self, destination: str, room_id: str, events: Iterable[str]
):
"""Fetch the given events from a server, and persist them as outliers.
This function *does not* recursively get missing auth events of the
newly fetched events. Callers must include in the `events` argument
any missing events from the auth chain.
Logs a warning if we can't find the given event.
"""
room_version = await self.store.get_room_version(room_id)
event_map = {} # type: Dict[str, EventBase]
async def get_event(event_id: str):
with nested_logging_context(event_id):
try:
event = await self.federation_client.get_pdu(
[destination], event_id, room_version, outlier=True,
)
if event is None:
logger.warning(
"Server %s didn't return event %s", destination, event_id,
)
return
event_map[event.event_id] = event
except Exception as e:
logger.warning(
"Error fetching missing state/auth event %s: %s %s",
event_id,
type(e),
e,
)
await concurrently_execute(get_event, events, 5)
# Make a map of auth events for each event. We do this after fetching
# all the events as some of the events' auth events will be in the list
# of requested events.
auth_events = [
aid
for event in event_map.values()
for aid in event.auth_event_ids()
if aid not in event_map
]
persisted_events = await self.store.get_events(
auth_events, allow_rejected=True,
)
event_infos = []
for event in event_map.values():
auth = {}
for auth_event_id in event.auth_event_ids():
ae = persisted_events.get(auth_event_id) or event_map.get(auth_event_id)
if ae:
auth[(ae.type, ae.state_key)] = ae
else:
logger.info("Missing auth event %s", auth_event_id)
event_infos.append(_NewEventInfo(event, None, auth))
await self._handle_new_events(
destination, room_id, event_infos,
)
def _sanity_check_event(self, ev):
"""
Do some early sanity checks of a received event
In particular, checks it doesn't have an excessive number of
prev_events or auth_events, which could cause a huge state resolution
or cascade of event fetches.
Args:
ev (synapse.events.EventBase): event to be checked
Returns: None
Raises:
SynapseError if the event does not pass muster
"""
if len(ev.prev_event_ids()) > 20:
logger.warning(
"Rejecting event %s which has %i prev_events",
ev.event_id,
len(ev.prev_event_ids()),
)
raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many prev_events")
if len(ev.auth_event_ids()) > 10:
logger.warning(
"Rejecting event %s which has %i auth_events",
ev.event_id,
len(ev.auth_event_ids()),
)
raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many auth_events")
async def send_invite(self, target_host, event):
""" Sends the invite to the remote server for signing.
Invites must be signed by the invitee's server before distribution.
"""
pdu = await self.federation_client.send_invite(
destination=target_host,
room_id=event.room_id,
event_id=event.event_id,
pdu=event,
)
return pdu
async def on_event_auth(self, event_id: str) -> List[EventBase]:
event = await self.store.get_event(event_id)
auth = await self.store.get_auth_chain(
list(event.auth_event_ids()), include_given=True
)
return list(auth)
async def do_invite_join(
self, target_hosts: Iterable[str], room_id: str, joinee: str, content: JsonDict
) -> Tuple[str, int]:
""" Attempts to join the `joinee` to the room `room_id` via the
servers contained in `target_hosts`.
This first triggers a /make_join/ request that returns a partial
event that we can fill out and sign. This is then sent to the
remote server via /send_join/ which responds with the state at that
event and the auth_chains.
We suspend processing of any received events from this room until we
have finished processing the join.
Args:
target_hosts: List of servers to attempt to join the room with.
room_id: The ID of the room to join.
joinee: The User ID of the joining user.
content: The event content to use for the join event.
"""
# TODO: We should be able to call this on workers, but the upgrading of
# room stuff after join currently doesn't work on workers.
assert self.config.worker.worker_app is None
logger.debug("Joining %s to %s", joinee, room_id)
origin, event, room_version_obj = await self._make_and_verify_event(
target_hosts,
room_id,
joinee,
"join",
content,
params={"ver": KNOWN_ROOM_VERSIONS},
)
# This shouldn't happen, because the RoomMemberHandler has a
# linearizer lock which only allows one operation per user per room
# at a time - so this is just paranoia.
assert room_id not in self.room_queues
self.room_queues[room_id] = []
await self._clean_room_for_join(room_id)
handled_events = set()
try:
# Try the host we successfully got a response to /make_join/
# request first.
host_list = list(target_hosts)
try:
host_list.remove(origin)
host_list.insert(0, origin)
except ValueError:
pass
ret = await self.federation_client.send_join(
host_list, event, room_version_obj
)
origin = ret["origin"]
state = ret["state"]
auth_chain = ret["auth_chain"]
auth_chain.sort(key=lambda e: e.depth)
handled_events.update([s.event_id for s in state])
handled_events.update([a.event_id for a in auth_chain])
handled_events.add(event.event_id)
logger.debug("do_invite_join auth_chain: %s", auth_chain)
logger.debug("do_invite_join state: %s", state)
logger.debug("do_invite_join event: %s", event)
# if this is the first time we've joined this room, it's time to add
# a row to `rooms` with the correct room version. If there's already a
# row there, we should override it, since it may have been populated
# based on an invite request which lied about the room version.
#
# federation_client.send_join has already checked that the room
# version in the received create event is the same as room_version_obj,
# so we can rely on it now.
#
await self.store.upsert_room_on_join(
room_id=room_id, room_version=room_version_obj,
)
max_stream_id = await self._persist_auth_tree(
origin, room_id, auth_chain, state, event, room_version_obj
)
# We wait here until this instance has seen the events come down
# replication (if we're using replication) as the below uses caches.
await self._replication.wait_for_stream_position(
self.config.worker.events_shard_config.get_instance(room_id),
"events",
max_stream_id,
)
# Check whether this room is the result of an upgrade of a room we already know
# about. If so, migrate over user information
predecessor = await self.store.get_room_predecessor(room_id)
if not predecessor or not isinstance(predecessor.get("room_id"), str):
return event.event_id, max_stream_id
old_room_id = predecessor["room_id"]
logger.debug(
"Found predecessor for %s during remote join: %s", room_id, old_room_id
)
# We retrieve the room member handler here as to not cause a cyclic dependency
member_handler = self.hs.get_room_member_handler()
await member_handler.transfer_room_state_on_room_upgrade(
old_room_id, room_id
)
logger.debug("Finished joining %s to %s", joinee, room_id)
return event.event_id, max_stream_id
finally:
room_queue = self.room_queues[room_id]
del self.room_queues[room_id]
# we don't need to wait for the queued events to be processed -
# it's just a best-effort thing at this point. We do want to do
# them roughly in order, though, otherwise we'll end up making
# lots of requests for missing prev_events which we do actually
# have. Hence we fire off the background task, but don't wait for it.
run_in_background(self._handle_queued_pdus, room_queue)
async def _handle_queued_pdus(self, room_queue):
"""Process PDUs which got queued up while we were busy send_joining.
Args:
room_queue (list[FrozenEvent, str]): list of PDUs to be processed
and the servers that sent them
"""
for p, origin in room_queue:
try:
logger.info(
"Processing queued PDU %s which was received "
"while we were joining %s",
p.event_id,
p.room_id,
)
with nested_logging_context(p.event_id):
await self.on_receive_pdu(origin, p, sent_to_us_directly=True)
except Exception as e:
logger.warning(
"Error handling queued PDU %s from %s: %s", p.event_id, origin, e
)
async def on_make_join_request(
self, origin: str, room_id: str, user_id: str
) -> EventBase:
""" We've received a /make_join/ request, so we create a partial
join event for the room and return that. We do *not* persist or
process it until the other server has signed it and sent it back.
Args:
origin: The (verified) server name of the requesting server.
room_id: Room to create join event in
user_id: The user to create the join for
"""
if get_domain_from_id(user_id) != origin:
logger.info(
"Got /make_join request for user %r from different origin %s, ignoring",
user_id,
origin,
)
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
# checking the room version will check that we've actually heard of the room
# (and return a 404 otherwise)
room_version = await self.store.get_room_version_id(room_id)
# now check that we are *still* in the room
is_in_room = await self.auth.check_host_in_room(room_id, self.server_name)
if not is_in_room:
logger.info(
"Got /make_join request for room %s we are no longer in", room_id,
)
raise NotFoundError("Not an active room on this server")
event_content = {"membership": Membership.JOIN}
builder = self.event_builder_factory.new(
room_version,
{
"type": EventTypes.Member,
"content": event_content,
"room_id": room_id,
"sender": user_id,
"state_key": user_id,
},
)
try:
event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
)
except SynapseError as e:
logger.warning("Failed to create join to %s because %s", room_id, e)
raise
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_join_request`
await self.auth.check_from_context(
room_version, event, context, do_sig_check=False
)
return event
async def on_send_join_request(self, origin, pdu):
""" We have received a join event for a room. Fully process it and
respond with the current state and auth chains.
"""
event = pdu
logger.debug(
"on_send_join_request from %s: Got event: %s, signatures: %s",
origin,
event.event_id,
event.signatures,
)
if get_domain_from_id(event.sender) != origin:
logger.info(
"Got /send_join request for user %r from different origin %s",
event.sender,
origin,
)
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
event.internal_metadata.outlier = False
# Send this event on behalf of the origin server.
#
# The reasons we have the destination server rather than the origin
# server send it are slightly mysterious: the origin server should have
# all the necessary state once it gets the response to the send_join,
# so it could send the event itself if it wanted to. It may be that
# doing it this way reduces failure modes, or avoids certain attacks
# where a new server selectively tells a subset of the federation that
# it has joined.
#
# The fact is that, as of the current writing, Synapse doesn't send out
# the join event over federation after joining, and changing it now
# would introduce the danger of backwards-compatibility problems.
event.internal_metadata.send_on_behalf_of = origin
context = await self._handle_new_event(origin, event)
logger.debug(
"on_send_join_request: After _handle_new_event: %s, sigs: %s",
event.event_id,
event.signatures,
)
prev_state_ids = await context.get_prev_state_ids()
state_ids = list(prev_state_ids.values())
auth_chain = await self.store.get_auth_chain(state_ids)
state = await self.store.get_events(list(prev_state_ids.values()))
return {"state": list(state.values()), "auth_chain": auth_chain}
async def on_invite_request(
self, origin: str, event: EventBase, room_version: RoomVersion
):
""" We've got an invite event. Process and persist it. Sign it.
Respond with the now signed event.
"""
if event.state_key is None:
raise SynapseError(400, "The invite event did not have a state key")
is_blocked = await self.store.is_room_blocked(event.room_id)
if is_blocked:
raise SynapseError(403, "This room has been blocked on this server")
if self.hs.config.block_non_admin_invites:
raise SynapseError(403, "This server does not accept room invites")
if not self.spam_checker.user_may_invite(
event.sender, event.state_key, event.room_id
):
raise SynapseError(
403, "This user is not permitted to send invites to this server/user"
)
membership = event.content.get("membership")
if event.type != EventTypes.Member or membership != Membership.INVITE:
raise SynapseError(400, "The event was not an m.room.member invite event")
sender_domain = get_domain_from_id(event.sender)
if sender_domain != origin:
raise SynapseError(
400, "The invite event was not from the server sending it"
)
if not self.is_mine_id(event.state_key):
raise SynapseError(400, "The invite event must be for this server")
# block any attempts to invite the server notices mxid
if event.state_key == self._server_notices_mxid:
raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user")
# keep a record of the room version, if we don't yet know it.
# (this may get overwritten if we later get a different room version in a
# join dance).
await self._maybe_store_room_on_invite(
room_id=event.room_id, room_version=room_version
)
event.internal_metadata.outlier = True
event.internal_metadata.out_of_band_membership = True
event.signatures.update(
compute_event_signature(
room_version,
event.get_pdu_json(),
self.hs.hostname,
self.hs.signing_key,
)
)
context = await self.state_handler.compute_event_context(event)
await self.persist_events_and_notify(event.room_id, [(event, context)])
return event
async def do_remotely_reject_invite(
self, target_hosts: Iterable[str], room_id: str, user_id: str, content: JsonDict
) -> Tuple[EventBase, int]:
origin, event, room_version = await self._make_and_verify_event(
target_hosts, room_id, user_id, "leave", content=content
)
# Mark as outlier as we don't have any state for this event; we're not
# even in the room.
event.internal_metadata.outlier = True
event.internal_metadata.out_of_band_membership = True
# Try the host that we successfully called /make_leave/ on first for
# the /send_leave/ request.
host_list = list(target_hosts)
try:
host_list.remove(origin)
host_list.insert(0, origin)
except ValueError:
pass
await self.federation_client.send_leave(host_list, event)
context = await self.state_handler.compute_event_context(event)
stream_id = await self.persist_events_and_notify(
event.room_id, [(event, context)]
)
return event, stream_id
async def _make_and_verify_event(
self,
target_hosts: Iterable[str],
room_id: str,
user_id: str,
membership: str,
content: JsonDict = {},
params: Optional[Dict[str, Union[str, Iterable[str]]]] = None,
) -> Tuple[str, EventBase, RoomVersion]:
(
origin,
event,
room_version,
) = await self.federation_client.make_membership_event(
target_hosts, room_id, user_id, membership, content, params=params
)
logger.debug("Got response to make_%s: %s", membership, event)
# We should assert some things.
# FIXME: Do this in a nicer way
assert event.type == EventTypes.Member
assert event.user_id == user_id
assert event.state_key == user_id
assert event.room_id == room_id
return origin, event, room_version
async def on_make_leave_request(
self, origin: str, room_id: str, user_id: str
) -> EventBase:
""" We've received a /make_leave/ request, so we create a partial
leave event for the room and return that. We do *not* persist or
process it until the other server has signed it and sent it back.
Args:
origin: The (verified) server name of the requesting server.
room_id: Room to create leave event in
user_id: The user to create the leave for
"""
if get_domain_from_id(user_id) != origin:
logger.info(
"Got /make_leave request for user %r from different origin %s, ignoring",
user_id,
origin,
)
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
room_version = await self.store.get_room_version_id(room_id)
builder = self.event_builder_factory.new(
room_version,
{
"type": EventTypes.Member,
"content": {"membership": Membership.LEAVE},
"room_id": room_id,
"sender": user_id,
"state_key": user_id,
},
)
event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
)
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_leave_request`
await self.auth.check_from_context(
room_version, event, context, do_sig_check=False
)
except AuthError as e:
logger.warning("Failed to create new leave %r because %s", event, e)
raise e
return event
async def on_send_leave_request(self, origin, pdu):
""" We have received a leave event for a room. Fully process it."""
event = pdu
logger.debug(
"on_send_leave_request: Got event: %s, signatures: %s",
event.event_id,
event.signatures,
)
if get_domain_from_id(event.sender) != origin:
logger.info(
"Got /send_leave request for user %r from different origin %s",
event.sender,
origin,
)
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
event.internal_metadata.outlier = False
await self._handle_new_event(origin, event)
logger.debug(
"on_send_leave_request: After _handle_new_event: %s, sigs: %s",
event.event_id,
event.signatures,
)
return None
async def get_state_for_pdu(self, room_id: str, event_id: str) -> List[EventBase]:
"""Returns the state at the event. i.e. not including said event.
"""
event = await self.store.get_event(event_id, check_room_id=room_id)
state_groups = await self.state_store.get_state_groups(room_id, [event_id])
if state_groups:
_, state = list(state_groups.items()).pop()
results = {(e.type, e.state_key): e for e in state}
if event.is_state():
# Get previous state
if "replaces_state" in event.unsigned:
prev_id = event.unsigned["replaces_state"]
if prev_id != event.event_id:
prev_event = await self.store.get_event(prev_id)
results[(event.type, event.state_key)] = prev_event
else:
del results[(event.type, event.state_key)]
res = list(results.values())
return res
else:
return []
async def get_state_ids_for_pdu(self, room_id: str, event_id: str) -> List[str]:
"""Returns the state at the event. i.e. not including said event.
"""
event = await self.store.get_event(event_id, check_room_id=room_id)
state_groups = await self.state_store.get_state_groups_ids(room_id, [event_id])
if state_groups:
_, state = list(state_groups.items()).pop()
results = state
if event.is_state():
# Get previous state
if "replaces_state" in event.unsigned:
prev_id = event.unsigned["replaces_state"]
if prev_id != event.event_id:
results[(event.type, event.state_key)] = prev_id
else:
results.pop((event.type, event.state_key), None)
return list(results.values())
else:
return []
@log_function
async def on_backfill_request(
self, origin: str, room_id: str, pdu_list: List[str], limit: int
) -> List[EventBase]:
in_room = await self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
# Synapse asks for 100 events per backfill request. Do not allow more.
limit = min(limit, 100)
events = await self.store.get_backfill_events(room_id, pdu_list, limit)
events = await filter_events_for_server(self.storage, origin, events)
return events
@log_function
async def get_persisted_pdu(
self, origin: str, event_id: str
) -> Optional[EventBase]:
"""Get an event from the database for the given server.
Args:
origin: hostname of server which is requesting the event; we
will check that the server is allowed to see it.
event_id: id of the event being requested
Returns:
None if we know nothing about the event; otherwise the (possibly-redacted) event.
Raises:
AuthError if the server is not currently in the room
"""
event = await self.store.get_event(
event_id, allow_none=True, allow_rejected=True
)
if event:
in_room = await self.auth.check_host_in_room(event.room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
events = await filter_events_for_server(self.storage, origin, [event])
event = events[0]
return event
else:
return None
async def get_min_depth_for_context(self, context):
return await self.store.get_min_depth(context)
async def _handle_new_event(
self, origin, event, state=None, auth_events=None, backfilled=False
):
context = await self._prep_event(
origin, event, state=state, auth_events=auth_events, backfilled=backfilled
)
try:
if (
not event.internal_metadata.is_outlier()
and not backfilled
and not context.rejected
):
await self.action_generator.handle_push_actions_for_event(
event, context
)
await self.persist_events_and_notify(
event.room_id, [(event, context)], backfilled=backfilled
)
except Exception:
run_in_background(
self.store.remove_push_actions_from_staging, event.event_id
)
raise
return context
async def _handle_new_events(
self,
origin: str,
room_id: str,
event_infos: Iterable[_NewEventInfo],
backfilled: bool = False,
) -> None:
"""Creates the appropriate contexts and persists events. The events
should not depend on one another, e.g. this should be used to persist
a bunch of outliers, but not a chunk of individual events that depend
on each other for state calculations.
Notifies about the events where appropriate.
"""
async def prep(ev_info: _NewEventInfo):
event = ev_info.event
with nested_logging_context(suffix=event.event_id):
res = await self._prep_event(
origin,
event,
state=ev_info.state,
auth_events=ev_info.auth_events,
backfilled=backfilled,
)
return res
contexts = await make_deferred_yieldable(
defer.gatherResults(
[run_in_background(prep, ev_info) for ev_info in event_infos],
consumeErrors=True,
)
)
await self.persist_events_and_notify(
room_id,
[
(ev_info.event, context)
for ev_info, context in zip(event_infos, contexts)
],
backfilled=backfilled,
)
async def _persist_auth_tree(
self,
origin: str,
room_id: str,
auth_events: List[EventBase],
state: List[EventBase],
event: EventBase,
room_version: RoomVersion,
) -> int:
"""Checks the auth chain is valid (and passes auth checks) for the
state and event. Then persists the auth chain and state atomically.
Persists the event separately. Notifies about the persisted events
where appropriate.
Will attempt to fetch missing auth events.
Args:
origin: Where the events came from
room_id,
auth_events
state
event
room_version: The room version we expect this room to have, and
will raise if it doesn't match the version in the create event.
"""
events_to_context = {}
for e in itertools.chain(auth_events, state):
e.internal_metadata.outlier = True
ctx = await self.state_handler.compute_event_context(e)
events_to_context[e.event_id] = ctx
event_map = {
e.event_id: e for e in itertools.chain(auth_events, state, [event])
}
create_event = None
for e in auth_events:
if (e.type, e.state_key) == (EventTypes.Create, ""):
create_event = e
break
if create_event is None:
# If the state doesn't have a create event then the room is
# invalid, and it would fail auth checks anyway.
raise SynapseError(400, "No create event in state")
room_version_id = create_event.content.get(
"room_version", RoomVersions.V1.identifier
)
if room_version.identifier != room_version_id:
raise SynapseError(400, "Room version mismatch")
missing_auth_events = set()
for e in itertools.chain(auth_events, state, [event]):
for e_id in e.auth_event_ids():
if e_id not in event_map:
missing_auth_events.add(e_id)
for e_id in missing_auth_events:
m_ev = await self.federation_client.get_pdu(
[origin], e_id, room_version=room_version, outlier=True, timeout=10000,
)
if m_ev and m_ev.event_id == e_id:
event_map[e_id] = m_ev
else:
logger.info("Failed to find auth event %r", e_id)
for e in itertools.chain(auth_events, state, [event]):
auth_for_e = {
(event_map[e_id].type, event_map[e_id].state_key): event_map[e_id]
for e_id in e.auth_event_ids()
if e_id in event_map
}
if create_event:
auth_for_e[(EventTypes.Create, "")] = create_event
try:
event_auth.check(room_version, e, auth_events=auth_for_e)
except SynapseError as err:
# we may get SynapseErrors here as well as AuthErrors. For
# instance, there are a couple of (ancient) events in some
# rooms whose senders do not have the correct sigil; these
# cause SynapseErrors in auth.check. We don't want to give up
# the attempt to federate altogether in such cases.
logger.warning("Rejecting %s because %s", e.event_id, err.msg)
if e == event:
raise
events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR
await self.persist_events_and_notify(
room_id,
[
(e, events_to_context[e.event_id])
for e in itertools.chain(auth_events, state)
],
)
new_event_context = await self.state_handler.compute_event_context(
event, old_state=state
)
return await self.persist_events_and_notify(
room_id, [(event, new_event_context)]
)
async def _prep_event(
self,
origin: str,
event: EventBase,
state: Optional[Iterable[EventBase]],
auth_events: Optional[MutableStateMap[EventBase]],
backfilled: bool,
) -> EventContext:
context = await self.state_handler.compute_event_context(event, old_state=state)
if not auth_events:
prev_state_ids = await context.get_prev_state_ids()
auth_events_ids = self.auth.compute_auth_events(
event, prev_state_ids, for_verification=True
)
auth_events_x = await self.store.get_events(auth_events_ids)
auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()}
# This is a hack to fix some old rooms where the initial join event
# didn't reference the create event in its auth events.
if event.type == EventTypes.Member and not event.auth_event_ids():
if len(event.prev_event_ids()) == 1 and event.depth < 5:
c = await self.store.get_event(
event.prev_event_ids()[0], allow_none=True
)
if c and c.type == EventTypes.Create:
auth_events[(c.type, c.state_key)] = c
context = await self.do_auth(origin, event, context, auth_events=auth_events)
if not context.rejected:
await self._check_for_soft_fail(event, state, backfilled)
if event.type == EventTypes.GuestAccess and not context.rejected:
await self.maybe_kick_guest_users(event)
return context
async def _check_for_soft_fail(
self, event: EventBase, state: Optional[Iterable[EventBase]], backfilled: bool
) -> None:
"""Checks if we should soft fail the event; if so, marks the event as
such.
Args:
event
state: The state at the event if we don't have all the event's prev events
backfilled: Whether the event is from backfill
"""
# For new (non-backfilled and non-outlier) events we check if the event
# passes auth based on the current state. If it doesn't then we
# "soft-fail" the event.
if backfilled or event.internal_metadata.is_outlier():
return
extrem_ids_list = await self.store.get_latest_event_ids_in_room(event.room_id)
extrem_ids = set(extrem_ids_list)
prev_event_ids = set(event.prev_event_ids())
if extrem_ids == prev_event_ids:
# If they're the same then the current state is the same as the
# state at the event, so no point rechecking auth for soft fail.
return
room_version = await self.store.get_room_version_id(event.room_id)
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
# Calculate the "current state".
if state is not None:
# If we're explicitly given the state then we won't have all the
# prev events, and so we have a gap in the graph. In this case
# we want to be a little careful as we might have been down for
# a while and have an incorrect view of the current state,
# however we still want to do checks as gaps are easy to
# maliciously manufacture.
#
# So we use a "current state" that is actually a state
# resolution across the current forward extremities and the
# given state at the event. This should correctly handle cases
# like bans, especially with state res v2.
state_sets_d = await self.state_store.get_state_groups(
event.room_id, extrem_ids
)
state_sets = list(state_sets_d.values()) # type: List[Iterable[EventBase]]
state_sets.append(state)
current_states = await self.state_handler.resolve_events(
room_version, state_sets, event
)
current_state_ids = {
k: e.event_id for k, e in current_states.items()
} # type: StateMap[str]
else:
current_state_ids = await self.state_handler.get_current_state_ids(
event.room_id, latest_event_ids=extrem_ids
)
logger.debug(
"Doing soft-fail check for %s: state %s", event.event_id, current_state_ids,
)
# Now check if event pass auth against said current state
auth_types = auth_types_for_event(event)
current_state_ids_list = [
e for k, e in current_state_ids.items() if k in auth_types
]
auth_events_map = await self.store.get_events(current_state_ids_list)
current_auth_events = {
(e.type, e.state_key): e for e in auth_events_map.values()
}
try:
event_auth.check(room_version_obj, event, auth_events=current_auth_events)
except AuthError as e:
logger.warning("Soft-failing %r because %s", event, e)
event.internal_metadata.soft_failed = True
async def on_query_auth(
self, origin, event_id, room_id, remote_auth_chain, rejects, missing
):
in_room = await self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
event = await self.store.get_event(event_id, check_room_id=room_id)
# Just go through and process each event in `remote_auth_chain`. We
# don't want to fall into the trap of `missing` being wrong.
for e in remote_auth_chain:
try:
await self._handle_new_event(origin, e)
except AuthError:
pass
# Now get the current auth_chain for the event.
local_auth_chain = await self.store.get_auth_chain(
list(event.auth_event_ids()), include_given=True
)
# TODO: Check if we would now reject event_id. If so we need to tell
# everyone.
ret = await self.construct_auth_difference(local_auth_chain, remote_auth_chain)
logger.debug("on_query_auth returning: %s", ret)
return ret
async def on_get_missing_events(
self, origin, room_id, earliest_events, latest_events, limit
):
in_room = await self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
# Only allow up to 20 events to be retrieved per request.
limit = min(limit, 20)
missing_events = await self.store.get_missing_events(
room_id=room_id,
earliest_events=earliest_events,
latest_events=latest_events,
limit=limit,
)
missing_events = await filter_events_for_server(
self.storage, origin, missing_events
)
return missing_events
async def do_auth(
self,
origin: str,
event: EventBase,
context: EventContext,
auth_events: MutableStateMap[EventBase],
) -> EventContext:
"""
Args:
origin:
event:
context:
auth_events:
Map from (event_type, state_key) to event
Normally, our calculated auth_events based on the state of the room
at the event's position in the DAG, though occasionally (eg if the
event is an outlier), may be the auth events claimed by the remote
server.
Also NB that this function adds entries to it.
Returns:
updated context object
"""
room_version = await self.store.get_room_version_id(event.room_id)
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
try:
context = await self._update_auth_events_and_context_for_auth(
origin, event, context, auth_events
)
except Exception:
# We don't really mind if the above fails, so lets not fail
# processing if it does. However, it really shouldn't fail so
# let's still log as an exception since we'll still want to fix
# any bugs.
logger.exception(
"Failed to double check auth events for %s with remote. "
"Ignoring failure and continuing processing of event.",
event.event_id,
)
try:
event_auth.check(room_version_obj, event, auth_events=auth_events)
except AuthError as e:
logger.warning("Failed auth resolution for %r because %s", event, e)
context.rejected = RejectedReason.AUTH_ERROR
return context
async def _update_auth_events_and_context_for_auth(
self,
origin: str,
event: EventBase,
context: EventContext,
auth_events: MutableStateMap[EventBase],
) -> EventContext:
"""Helper for do_auth. See there for docs.
Checks whether a given event has the expected auth events. If it
doesn't then we talk to the remote server to compare state to see if
we can come to a consensus (e.g. if one server missed some valid
state).
This attempts to resolve any potential divergence of state between
servers, but is not essential and so failures should not block further
processing of the event.
Args:
origin:
event:
context:
auth_events:
Map from (event_type, state_key) to event
Normally, our calculated auth_events based on the state of the room
at the event's position in the DAG, though occasionally (eg if the
event is an outlier), may be the auth events claimed by the remote
server.
Also NB that this function adds entries to it.
Returns:
updated context
"""
event_auth_events = set(event.auth_event_ids())
# missing_auth is the set of the event's auth_events which we don't yet have
# in auth_events.
missing_auth = event_auth_events.difference(
e.event_id for e in auth_events.values()
)
# if we have missing events, we need to fetch those events from somewhere.
#
# we start by checking if they are in the store, and then try calling /event_auth/.
if missing_auth:
have_events = await self.store.have_seen_events(missing_auth)
logger.debug("Events %s are in the store", have_events)
missing_auth.difference_update(have_events)
if missing_auth:
# If we don't have all the auth events, we need to get them.
logger.info("auth_events contains unknown events: %s", missing_auth)
try:
try:
remote_auth_chain = await self.federation_client.get_event_auth(
origin, event.room_id, event.event_id
)
except RequestSendFailed as e1:
# The other side isn't around or doesn't implement the
# endpoint, so lets just bail out.
logger.info("Failed to get event auth from remote: %s", e1)
return context
seen_remotes = await self.store.have_seen_events(
[e.event_id for e in remote_auth_chain]
)
for e in remote_auth_chain:
if e.event_id in seen_remotes:
continue
if e.event_id == event.event_id:
continue
try:
auth_ids = e.auth_event_ids()
auth = {
(e.type, e.state_key): e
for e in remote_auth_chain
if e.event_id in auth_ids or e.type == EventTypes.Create
}
e.internal_metadata.outlier = True
logger.debug(
"do_auth %s missing_auth: %s", event.event_id, e.event_id
)
await self._handle_new_event(origin, e, auth_events=auth)
if e.event_id in event_auth_events:
auth_events[(e.type, e.state_key)] = e
except AuthError:
pass
except Exception:
logger.exception("Failed to get auth chain")
if event.internal_metadata.is_outlier():
# XXX: given that, for an outlier, we'll be working with the
# event's *claimed* auth events rather than those we calculated:
# (a) is there any point in this test, since different_auth below will
# obviously be empty
# (b) alternatively, why don't we do it earlier?
logger.info("Skipping auth_event fetch for outlier")
return context
different_auth = event_auth_events.difference(
e.event_id for e in auth_events.values()
)
if not different_auth:
return context
logger.info(
"auth_events refers to events which are not in our calculated auth "
"chain: %s",
different_auth,
)
# XXX: currently this checks for redactions but I'm not convinced that is
# necessary?
different_events = await self.store.get_events_as_list(different_auth)
for d in different_events:
if d.room_id != event.room_id:
logger.warning(
"Event %s refers to auth_event %s which is in a different room",
event.event_id,
d.event_id,
)
# don't attempt to resolve the claimed auth events against our own
# in this case: just use our own auth events.
#
# XXX: should we reject the event in this case? It feels like we should,
# but then shouldn't we also do so if we've failed to fetch any of the
# auth events?
return context
# now we state-resolve between our own idea of the auth events, and the remote's
# idea of them.
local_state = auth_events.values()
remote_auth_events = dict(auth_events)
remote_auth_events.update({(d.type, d.state_key): d for d in different_events})
remote_state = remote_auth_events.values()
room_version = await self.store.get_room_version_id(event.room_id)
new_state = await self.state_handler.resolve_events(
room_version, (local_state, remote_state), event
)
logger.info(
"After state res: updating auth_events with new state %s",
{
(d.type, d.state_key): d.event_id
for d in new_state.values()
if auth_events.get((d.type, d.state_key)) != d
},
)
auth_events.update(new_state)
context = await self._update_context_for_auth_events(
event, context, auth_events
)
return context
async def _update_context_for_auth_events(
self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase]
) -> EventContext:
"""Update the state_ids in an event context after auth event resolution,
storing the changes as a new state group.
Args:
event: The event we're handling the context for
context: initial event context
auth_events: Events to update in the event context.
Returns:
new event context
"""
# exclude the state key of the new event from the current_state in the context.
if event.is_state():
event_key = (event.type, event.state_key) # type: Optional[Tuple[str, str]]
else:
event_key = None
state_updates = {
k: a.event_id for k, a in auth_events.items() if k != event_key
}
current_state_ids = await context.get_current_state_ids()
current_state_ids = dict(current_state_ids) # type: ignore
current_state_ids.update(state_updates)
prev_state_ids = await context.get_prev_state_ids()
prev_state_ids = dict(prev_state_ids)
prev_state_ids.update({k: a.event_id for k, a in auth_events.items()})
# create a new state group as a delta from the existing one.
prev_group = context.state_group
state_group = await self.state_store.store_state_group(
event.event_id,
event.room_id,
prev_group=prev_group,
delta_ids=state_updates,
current_state_ids=current_state_ids,
)
return EventContext.with_state(
state_group=state_group,
state_group_before_event=context.state_group_before_event,
current_state_ids=current_state_ids,
prev_state_ids=prev_state_ids,
prev_group=prev_group,
delta_ids=state_updates,
)
async def construct_auth_difference(
self, local_auth: Iterable[EventBase], remote_auth: Iterable[EventBase]
) -> Dict:
""" Given a local and remote auth chain, find the differences. This
assumes that we have already processed all events in remote_auth
Params:
local_auth (list)
remote_auth (list)
Returns:
dict
"""
logger.debug("construct_auth_difference Start!")
# TODO: Make sure we are OK with local_auth or remote_auth having more
# auth events in them than strictly necessary.
def sort_fun(ev):
return ev.depth, ev.event_id
logger.debug("construct_auth_difference after sort_fun!")
# We find the differences by starting at the "bottom" of each list
# and iterating up on both lists. The lists are ordered by depth and
# then event_id, we iterate up both lists until we find the event ids
# don't match. Then we look at depth/event_id to see which side is
# missing that event, and iterate only up that list. Repeat.
remote_list = list(remote_auth)
remote_list.sort(key=sort_fun)
local_list = list(local_auth)
local_list.sort(key=sort_fun)
local_iter = iter(local_list)
remote_iter = iter(remote_list)
logger.debug("construct_auth_difference before get_next!")
def get_next(it, opt=None):
try:
return next(it)
except Exception:
return opt
current_local = get_next(local_iter)
current_remote = get_next(remote_iter)
logger.debug("construct_auth_difference before while")
missing_remotes = []
missing_locals = []
while current_local or current_remote:
if current_remote is None:
missing_locals.append(current_local)
current_local = get_next(local_iter)
continue
if current_local is None:
missing_remotes.append(current_remote)
current_remote = get_next(remote_iter)
continue
if current_local.event_id == current_remote.event_id:
current_local = get_next(local_iter)
current_remote = get_next(remote_iter)
continue
if current_local.depth < current_remote.depth:
missing_locals.append(current_local)
current_local = get_next(local_iter)
continue
if current_local.depth > current_remote.depth:
missing_remotes.append(current_remote)
current_remote = get_next(remote_iter)
continue
# They have the same depth, so we fall back to the event_id order
if current_local.event_id < current_remote.event_id:
missing_locals.append(current_local)
current_local = get_next(local_iter)
if current_local.event_id > current_remote.event_id:
missing_remotes.append(current_remote)
current_remote = get_next(remote_iter)
continue
logger.debug("construct_auth_difference after while")
# missing locals should be sent to the server
# We should find why we are missing remotes, as they will have been
# rejected.
# Remove events from missing_remotes if they are referencing a missing
# remote. We only care about the "root" rejected ones.
missing_remote_ids = [e.event_id for e in missing_remotes]
base_remote_rejected = list(missing_remotes)
for e in missing_remotes:
for e_id in e.auth_event_ids():
if e_id in missing_remote_ids:
try:
base_remote_rejected.remove(e)
except ValueError:
pass
reason_map = {}
for e in base_remote_rejected:
reason = await self.store.get_rejection_reason(e.event_id)
if reason is None:
# TODO: e is not in the current state, so we should
# construct some proof of that.
continue
reason_map[e.event_id] = reason
logger.debug("construct_auth_difference returning")
return {
"auth_chain": local_auth,
"rejects": {
e.event_id: {"reason": reason_map[e.event_id], "proof": None}
for e in base_remote_rejected
},
"missing": [e.event_id for e in missing_locals],
}
@log_function
async def exchange_third_party_invite(
self, sender_user_id, target_user_id, room_id, signed
):
third_party_invite = {"signed": signed}
event_dict = {
"type": EventTypes.Member,
"content": {
"membership": Membership.INVITE,
"third_party_invite": third_party_invite,
},
"room_id": room_id,
"sender": sender_user_id,
"state_key": target_user_id,
}
if await self.auth.check_host_in_room(room_id, self.hs.hostname):
room_version = await self.store.get_room_version_id(room_id)
builder = self.event_builder_factory.new(room_version, event_dict)
EventValidator().validate_builder(builder)
event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
)
event, context = await self.add_display_name_to_third_party_invite(
room_version, event_dict, event, context
)
EventValidator().validate_new(event, self.config)
# We need to tell the transaction queue to send this out, even
# though the sender isn't a local user.
event.internal_metadata.send_on_behalf_of = self.hs.hostname
try:
await self.auth.check_from_context(room_version, event, context)
except AuthError as e:
logger.warning("Denying new third party invite %r because %s", event, e)
raise e
await self._check_signature(event, context)
# We retrieve the room member handler here as to not cause a cyclic dependency
member_handler = self.hs.get_room_member_handler()
await member_handler.send_membership_event(None, event, context)
else:
destinations = {x.split(":", 1)[-1] for x in (sender_user_id, room_id)}
await self.federation_client.forward_third_party_invite(
destinations, room_id, event_dict
)
async def on_exchange_third_party_invite_request(
self, event_dict: JsonDict
) -> None:
"""Handle an exchange_third_party_invite request from a remote server
The remote server will call this when it wants to turn a 3pid invite
into a normal m.room.member invite.
Args:
event_dict: Dictionary containing the event body.
"""
assert_params_in_dict(event_dict, ["room_id"])
room_version = await self.store.get_room_version_id(event_dict["room_id"])
# NB: event_dict has a particular specced format we might need to fudge
# if we change event formats too much.
builder = self.event_builder_factory.new(room_version, event_dict)
event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
)
event, context = await self.add_display_name_to_third_party_invite(
room_version, event_dict, event, context
)
try:
await self.auth.check_from_context(room_version, event, context)
except AuthError as e:
logger.warning("Denying third party invite %r because %s", event, e)
raise e
await self._check_signature(event, context)
# We need to tell the transaction queue to send this out, even
# though the sender isn't a local user.
event.internal_metadata.send_on_behalf_of = get_domain_from_id(event.sender)
# We retrieve the room member handler here as to not cause a cyclic dependency
member_handler = self.hs.get_room_member_handler()
await member_handler.send_membership_event(None, event, context)
async def add_display_name_to_third_party_invite(
self, room_version, event_dict, event, context
):
key = (
EventTypes.ThirdPartyInvite,
event.content["third_party_invite"]["signed"]["token"],
)
original_invite = None
prev_state_ids = await context.get_prev_state_ids()
original_invite_id = prev_state_ids.get(key)
if original_invite_id:
original_invite = await self.store.get_event(
original_invite_id, allow_none=True
)
if original_invite:
# If the m.room.third_party_invite event's content is empty, it means the
# invite has been revoked. In this case, we don't have to raise an error here
# because the auth check will fail on the invite (because it's not able to
# fetch public keys from the m.room.third_party_invite event's content, which
# is empty).
display_name = original_invite.content.get("display_name")
event_dict["content"]["third_party_invite"]["display_name"] = display_name
else:
logger.info(
"Could not find invite event for third_party_invite: %r", event_dict
)
# We don't discard here as this is not the appropriate place to do
# auth checks. If we need the invite and don't have it then the
# auth check code will explode appropriately.
builder = self.event_builder_factory.new(room_version, event_dict)
EventValidator().validate_builder(builder)
event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
)
EventValidator().validate_new(event, self.config)
return (event, context)
async def _check_signature(self, event, context):
"""
Checks that the signature in the event is consistent with its invite.
Args:
event (Event): The m.room.member event to check
context (EventContext):
Raises:
AuthError: if signature didn't match any keys, or key has been
revoked,
SynapseError: if a transient error meant a key couldn't be checked
for revocation.
"""
signed = event.content["third_party_invite"]["signed"]
token = signed["token"]
prev_state_ids = await context.get_prev_state_ids()
invite_event_id = prev_state_ids.get((EventTypes.ThirdPartyInvite, token))
invite_event = None
if invite_event_id:
invite_event = await self.store.get_event(invite_event_id, allow_none=True)
if not invite_event:
raise AuthError(403, "Could not find invite")
logger.debug("Checking auth on event %r", event.content)
last_exception = None # type: Optional[Exception]
# for each public key in the 3pid invite event
for public_key_object in self.hs.get_auth().get_public_keys(invite_event):
try:
# for each sig on the third_party_invite block of the actual invite
for server, signature_block in signed["signatures"].items():
for key_name, encoded_signature in signature_block.items():
if not key_name.startswith("ed25519:"):
continue
logger.debug(
"Attempting to verify sig with key %s from %r "
"against pubkey %r",
key_name,
server,
public_key_object,
)
try:
public_key = public_key_object["public_key"]
verify_key = decode_verify_key_bytes(
key_name, decode_base64(public_key)
)
verify_signed_json(signed, server, verify_key)
logger.debug(
"Successfully verified sig with key %s from %r "
"against pubkey %r",
key_name,
server,
public_key_object,
)
except Exception:
logger.info(
"Failed to verify sig with key %s from %r "
"against pubkey %r",
key_name,
server,
public_key_object,
)
raise
try:
if "key_validity_url" in public_key_object:
await self._check_key_revocation(
public_key, public_key_object["key_validity_url"]
)
except Exception:
logger.info(
"Failed to query key_validity_url %s",
public_key_object["key_validity_url"],
)
raise
return
except Exception as e:
last_exception = e
if last_exception is None:
# we can only get here if get_public_keys() returned an empty list
# TODO: make this better
raise RuntimeError("no public key in invite event")
raise last_exception
async def _check_key_revocation(self, public_key, url):
"""
Checks whether public_key has been revoked.
Args:
public_key (str): base-64 encoded public key.
url (str): Key revocation URL.
Raises:
AuthError: if they key has been revoked.
SynapseError: if a transient error meant a key couldn't be checked
for revocation.
"""
try:
response = await self.http_client.get_json(url, {"public_key": public_key})
except Exception:
raise SynapseError(502, "Third party certificate could not be checked")
if "valid" not in response or not response["valid"]:
raise AuthError(403, "Third party certificate was invalid")
async def persist_events_and_notify(
self,
room_id: str,
event_and_contexts: Sequence[Tuple[EventBase, EventContext]],
backfilled: bool = False,
) -> int:
"""Persists events and tells the notifier/pushers about them, if
necessary.
Args:
room_id: The room ID of events being persisted.
event_and_contexts: Sequence of events with their associated
context that should be persisted. All events must belong to
the same room.
backfilled: Whether these events are a result of
backfilling or not
"""
instance = self.config.worker.events_shard_config.get_instance(room_id)
if instance != self._instance_name:
result = await self._send_events(
instance_name=instance,
store=self.store,
room_id=room_id,
event_and_contexts=event_and_contexts,
backfilled=backfilled,
)
return result["max_stream_id"]
else:
assert self.storage.persistence
# Note that this returns the events that were persisted, which may not be
# the same as were passed in if some were deduplicated due to transaction IDs.
events, max_stream_token = await self.storage.persistence.persist_events(
event_and_contexts, backfilled=backfilled
)
if self._ephemeral_messages_enabled:
for event in events:
# If there's an expiry timestamp on the event, schedule its expiry.
self._message_handler.maybe_schedule_expiry(event)
if not backfilled: # Never notify for backfilled events
for event in events:
await self._notify_persisted_event(event, max_stream_token)
return max_stream_token.stream
async def _notify_persisted_event(
self, event: EventBase, max_stream_token: RoomStreamToken
) -> None:
"""Checks to see if notifier/pushers should be notified about the
event or not.
Args:
event:
max_stream_id: The max_stream_id returned by persist_events
"""
extra_users = []
if event.type == EventTypes.Member:
target_user_id = event.state_key
# We notify for memberships if its an invite for one of our
# users
if event.internal_metadata.is_outlier():
if event.membership != Membership.INVITE:
if not self.is_mine_id(target_user_id):
return
target_user = UserID.from_string(target_user_id)
extra_users.append(target_user)
elif event.internal_metadata.is_outlier():
return
# the event has been persisted so it should have a stream ordering.
assert event.internal_metadata.stream_ordering
event_pos = PersistedEventPosition(
self._instance_name, event.internal_metadata.stream_ordering
)
self.notifier.on_new_room_event(
event, event_pos, max_stream_token, extra_users=extra_users
)
async def _clean_room_for_join(self, room_id: str) -> None:
"""Called to clean up any data in DB for a given room, ready for the
server to join the room.
Args:
room_id
"""
if self.config.worker_app:
await self._clean_room_for_join_client(room_id)
else:
await self.store.clean_room_for_join(room_id)
async def get_room_complexity(
self, remote_room_hosts: List[str], room_id: str
) -> Optional[dict]:
"""
Fetch the complexity of a remote room over federation.
Args:
remote_room_hosts (list[str]): The remote servers to ask.
room_id (str): The room ID to ask about.
Returns:
Dict contains the complexity
metric versions, while None means we could not fetch the complexity.
"""
for host in remote_room_hosts:
res = await self.federation_client.get_room_complexity(host, room_id)
# We got a result, return it.
if res:
return res
# We fell off the bottom, couldn't get the complexity from anyone. Oh
# well.
return None
| ./CrossVul/dataset_final_sorted/CWE-400/py/good_4373_3 |
crossvul-python_data_bad_4373_1 | # -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
# Copyright 2019 Matrix.org Federation C.I.C
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Dict,
List,
Optional,
Tuple,
Union,
)
from prometheus_client import Counter, Gauge, Histogram
from twisted.internet import defer
from twisted.internet.abstract import isIPAddress
from twisted.python import failure
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import (
AuthError,
Codes,
FederationError,
IncompatibleRoomVersionError,
NotFoundError,
SynapseError,
UnsupportedRoomVersionError,
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import EventBase
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Edu, Transaction
from synapse.http.endpoint import parse_server_name
from synapse.logging.context import (
make_deferred_yieldable,
nested_logging_context,
run_in_background,
)
from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace
from synapse.logging.utils import log_function
from synapse.replication.http.federation import (
ReplicationFederationSendEduRestServlet,
ReplicationGetQueryRestServlet,
)
from synapse.types import JsonDict, get_domain_from_id
from synapse.util import glob_to_regex, json_decoder, unwrapFirstError
from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.caches.response_cache import ResponseCache
if TYPE_CHECKING:
from synapse.server import HomeServer
# when processing incoming transactions, we try to handle multiple rooms in
# parallel, up to this limit.
TRANSACTION_CONCURRENCY_LIMIT = 10
logger = logging.getLogger(__name__)
received_pdus_counter = Counter("synapse_federation_server_received_pdus", "")
received_edus_counter = Counter("synapse_federation_server_received_edus", "")
received_queries_counter = Counter(
"synapse_federation_server_received_queries", "", ["type"]
)
pdu_process_time = Histogram(
"synapse_federation_server_pdu_process_time", "Time taken to process an event",
)
last_pdu_age_metric = Gauge(
"synapse_federation_last_received_pdu_age",
"The age (in seconds) of the last PDU successfully received from the given domain",
labelnames=("server_name",),
)
class FederationServer(FederationBase):
def __init__(self, hs):
super().__init__(hs)
self.auth = hs.get_auth()
self.handler = hs.get_federation_handler()
self.state = hs.get_state_handler()
self.device_handler = hs.get_device_handler()
# Ensure the following handlers are loaded since they register callbacks
# with FederationHandlerRegistry.
hs.get_directory_handler()
self._federation_ratelimiter = hs.get_federation_ratelimiter()
self._server_linearizer = Linearizer("fed_server")
self._transaction_linearizer = Linearizer("fed_txn_handler")
# We cache results for transaction with the same ID
self._transaction_resp_cache = ResponseCache(
hs, "fed_txn_handler", timeout_ms=30000
) # type: ResponseCache[Tuple[str, str]]
self.transaction_actions = TransactionActions(self.store)
self.registry = hs.get_federation_registry()
# We cache responses to state queries, as they take a while and often
# come in waves.
self._state_resp_cache = ResponseCache(
hs, "state_resp", timeout_ms=30000
) # type: ResponseCache[Tuple[str, str]]
self._state_ids_resp_cache = ResponseCache(
hs, "state_ids_resp", timeout_ms=30000
) # type: ResponseCache[Tuple[str, str]]
self._federation_metrics_domains = (
hs.get_config().federation.federation_metrics_domains
)
async def on_backfill_request(
self, origin: str, room_id: str, versions: List[str], limit: int
) -> Tuple[int, Dict[str, Any]]:
with (await self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
pdus = await self.handler.on_backfill_request(
origin, room_id, versions, limit
)
res = self._transaction_from_pdus(pdus).get_dict()
return 200, res
async def on_incoming_transaction(
self, origin: str, transaction_data: JsonDict
) -> Tuple[int, Dict[str, Any]]:
# keep this as early as possible to make the calculated origin ts as
# accurate as possible.
request_time = self._clock.time_msec()
transaction = Transaction(**transaction_data)
transaction_id = transaction.transaction_id # type: ignore
if not transaction_id:
raise Exception("Transaction missing transaction_id")
logger.debug("[%s] Got transaction", transaction_id)
# We wrap in a ResponseCache so that we de-duplicate retried
# transactions.
return await self._transaction_resp_cache.wrap(
(origin, transaction_id),
self._on_incoming_transaction_inner,
origin,
transaction,
request_time,
)
async def _on_incoming_transaction_inner(
self, origin: str, transaction: Transaction, request_time: int
) -> Tuple[int, Dict[str, Any]]:
# Use a linearizer to ensure that transactions from a remote are
# processed in order.
with await self._transaction_linearizer.queue(origin):
# We rate limit here *after* we've queued up the incoming requests,
# so that we don't fill up the ratelimiter with blocked requests.
#
# This is important as the ratelimiter allows N concurrent requests
# at a time, and only starts ratelimiting if there are more requests
# than that being processed at a time. If we queued up requests in
# the linearizer/response cache *after* the ratelimiting then those
# queued up requests would count as part of the allowed limit of N
# concurrent requests.
with self._federation_ratelimiter.ratelimit(origin) as d:
await d
result = await self._handle_incoming_transaction(
origin, transaction, request_time
)
return result
async def _handle_incoming_transaction(
self, origin: str, transaction: Transaction, request_time: int
) -> Tuple[int, Dict[str, Any]]:
""" Process an incoming transaction and return the HTTP response
Args:
origin: the server making the request
transaction: incoming transaction
request_time: timestamp that the HTTP request arrived at
Returns:
HTTP response code and body
"""
response = await self.transaction_actions.have_responded(origin, transaction)
if response:
logger.debug(
"[%s] We've already responded to this request",
transaction.transaction_id, # type: ignore
)
return response
logger.debug("[%s] Transaction is new", transaction.transaction_id) # type: ignore
# Reject if PDU count > 50 or EDU count > 100
if len(transaction.pdus) > 50 or ( # type: ignore
hasattr(transaction, "edus") and len(transaction.edus) > 100 # type: ignore
):
logger.info("Transaction PDU or EDU count too large. Returning 400")
response = {}
await self.transaction_actions.set_response(
origin, transaction, 400, response
)
return 400, response
# We process PDUs and EDUs in parallel. This is important as we don't
# want to block things like to device messages from reaching clients
# behind the potentially expensive handling of PDUs.
pdu_results, _ = await make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(
self._handle_pdus_in_txn, origin, transaction, request_time
),
run_in_background(self._handle_edus_in_txn, origin, transaction),
],
consumeErrors=True,
).addErrback(unwrapFirstError)
)
response = {"pdus": pdu_results}
logger.debug("Returning: %s", str(response))
await self.transaction_actions.set_response(origin, transaction, 200, response)
return 200, response
async def _handle_pdus_in_txn(
self, origin: str, transaction: Transaction, request_time: int
) -> Dict[str, dict]:
"""Process the PDUs in a received transaction.
Args:
origin: the server making the request
transaction: incoming transaction
request_time: timestamp that the HTTP request arrived at
Returns:
A map from event ID of a processed PDU to any errors we should
report back to the sending server.
"""
received_pdus_counter.inc(len(transaction.pdus)) # type: ignore
origin_host, _ = parse_server_name(origin)
pdus_by_room = {} # type: Dict[str, List[EventBase]]
newest_pdu_ts = 0
for p in transaction.pdus: # type: ignore
# FIXME (richardv): I don't think this works:
# https://github.com/matrix-org/synapse/issues/8429
if "unsigned" in p:
unsigned = p["unsigned"]
if "age" in unsigned:
p["age"] = unsigned["age"]
if "age" in p:
p["age_ts"] = request_time - int(p["age"])
del p["age"]
# We try and pull out an event ID so that if later checks fail we
# can log something sensible. We don't mandate an event ID here in
# case future event formats get rid of the key.
possible_event_id = p.get("event_id", "<Unknown>")
# Now we get the room ID so that we can check that we know the
# version of the room.
room_id = p.get("room_id")
if not room_id:
logger.info(
"Ignoring PDU as does not have a room_id. Event ID: %s",
possible_event_id,
)
continue
try:
room_version = await self.store.get_room_version(room_id)
except NotFoundError:
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
continue
except UnsupportedRoomVersionError as e:
# this can happen if support for a given room version is withdrawn,
# so that we still get events for said room.
logger.info("Ignoring PDU: %s", e)
continue
event = event_from_pdu_json(p, room_version)
pdus_by_room.setdefault(room_id, []).append(event)
if event.origin_server_ts > newest_pdu_ts:
newest_pdu_ts = event.origin_server_ts
pdu_results = {}
# we can process different rooms in parallel (which is useful if they
# require callouts to other servers to fetch missing events), but
# impose a limit to avoid going too crazy with ram/cpu.
async def process_pdus_for_room(room_id: str):
logger.debug("Processing PDUs for %s", room_id)
try:
await self.check_server_matches_acl(origin_host, room_id)
except AuthError as e:
logger.warning("Ignoring PDUs for room %s from banned server", room_id)
for pdu in pdus_by_room[room_id]:
event_id = pdu.event_id
pdu_results[event_id] = e.error_dict()
return
for pdu in pdus_by_room[room_id]:
event_id = pdu.event_id
with pdu_process_time.time():
with nested_logging_context(event_id):
try:
await self._handle_received_pdu(origin, pdu)
pdu_results[event_id] = {}
except FederationError as e:
logger.warning("Error handling PDU %s: %s", event_id, e)
pdu_results[event_id] = {"error": str(e)}
except Exception as e:
f = failure.Failure()
pdu_results[event_id] = {"error": str(e)}
logger.error(
"Failed to handle PDU %s",
event_id,
exc_info=(f.type, f.value, f.getTracebackObject()),
)
await concurrently_execute(
process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
)
if newest_pdu_ts and origin in self._federation_metrics_domains:
newest_pdu_age = self._clock.time_msec() - newest_pdu_ts
last_pdu_age_metric.labels(server_name=origin).set(newest_pdu_age / 1000)
return pdu_results
async def _handle_edus_in_txn(self, origin: str, transaction: Transaction):
"""Process the EDUs in a received transaction.
"""
async def _process_edu(edu_dict):
received_edus_counter.inc()
edu = Edu(
origin=origin,
destination=self.server_name,
edu_type=edu_dict["edu_type"],
content=edu_dict["content"],
)
await self.registry.on_edu(edu.edu_type, origin, edu.content)
await concurrently_execute(
_process_edu,
getattr(transaction, "edus", []),
TRANSACTION_CONCURRENCY_LIMIT,
)
async def on_context_state_request(
self, origin: str, room_id: str, event_id: str
) -> Tuple[int, Dict[str, Any]]:
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
in_room = await self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
# we grab the linearizer to protect ourselves from servers which hammer
# us. In theory we might already have the response to this query
# in the cache so we could return it without waiting for the linearizer
# - but that's non-trivial to get right, and anyway somewhat defeats
# the point of the linearizer.
with (await self._server_linearizer.queue((origin, room_id))):
resp = dict(
await self._state_resp_cache.wrap(
(room_id, event_id),
self._on_context_state_request_compute,
room_id,
event_id,
)
)
room_version = await self.store.get_room_version_id(room_id)
resp["room_version"] = room_version
return 200, resp
async def on_state_ids_request(
self, origin: str, room_id: str, event_id: str
) -> Tuple[int, Dict[str, Any]]:
if not event_id:
raise NotImplementedError("Specify an event")
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
in_room = await self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
resp = await self._state_ids_resp_cache.wrap(
(room_id, event_id), self._on_state_ids_request_compute, room_id, event_id,
)
return 200, resp
async def _on_state_ids_request_compute(self, room_id, event_id):
state_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
auth_chain_ids = await self.store.get_auth_chain_ids(state_ids)
return {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
async def _on_context_state_request_compute(
self, room_id: str, event_id: str
) -> Dict[str, list]:
if event_id:
pdus = await self.handler.get_state_for_pdu(room_id, event_id)
else:
pdus = (await self.state.get_current_state(room_id)).values()
auth_chain = await self.store.get_auth_chain([pdu.event_id for pdu in pdus])
return {
"pdus": [pdu.get_pdu_json() for pdu in pdus],
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
}
async def on_pdu_request(
self, origin: str, event_id: str
) -> Tuple[int, Union[JsonDict, str]]:
pdu = await self.handler.get_persisted_pdu(origin, event_id)
if pdu:
return 200, self._transaction_from_pdus([pdu]).get_dict()
else:
return 404, ""
async def on_query_request(
self, query_type: str, args: Dict[str, str]
) -> Tuple[int, Dict[str, Any]]:
received_queries_counter.labels(query_type).inc()
resp = await self.registry.on_query(query_type, args)
return 200, resp
async def on_make_join_request(
self, origin: str, room_id: str, user_id: str, supported_versions: List[str]
) -> Dict[str, Any]:
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
room_version = await self.store.get_room_version_id(room_id)
if room_version not in supported_versions:
logger.warning(
"Room version %s not in %s", room_version, supported_versions
)
raise IncompatibleRoomVersionError(room_version=room_version)
pdu = await self.handler.on_make_join_request(origin, room_id, user_id)
time_now = self._clock.time_msec()
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
async def on_invite_request(
self, origin: str, content: JsonDict, room_version_id: str
) -> Dict[str, Any]:
room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
if not room_version:
raise SynapseError(
400,
"Homeserver does not support this room version",
Codes.UNSUPPORTED_ROOM_VERSION,
)
pdu = event_from_pdu_json(content, room_version)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
pdu = await self._check_sigs_and_hash(room_version, pdu)
ret_pdu = await self.handler.on_invite_request(origin, pdu, room_version)
time_now = self._clock.time_msec()
return {"event": ret_pdu.get_pdu_json(time_now)}
async def on_send_join_request(
self, origin: str, content: JsonDict, room_id: str
) -> Dict[str, Any]:
logger.debug("on_send_join_request: content: %s", content)
room_version = await self.store.get_room_version(room_id)
pdu = event_from_pdu_json(content, room_version)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
pdu = await self._check_sigs_and_hash(room_version, pdu)
res_pdus = await self.handler.on_send_join_request(origin, pdu)
time_now = self._clock.time_msec()
return {
"state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
"auth_chain": [p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]],
}
async def on_make_leave_request(
self, origin: str, room_id: str, user_id: str
) -> Dict[str, Any]:
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
pdu = await self.handler.on_make_leave_request(origin, room_id, user_id)
room_version = await self.store.get_room_version_id(room_id)
time_now = self._clock.time_msec()
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
async def on_send_leave_request(
self, origin: str, content: JsonDict, room_id: str
) -> dict:
logger.debug("on_send_leave_request: content: %s", content)
room_version = await self.store.get_room_version(room_id)
pdu = event_from_pdu_json(content, room_version)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
pdu = await self._check_sigs_and_hash(room_version, pdu)
await self.handler.on_send_leave_request(origin, pdu)
return {}
async def on_event_auth(
self, origin: str, room_id: str, event_id: str
) -> Tuple[int, Dict[str, Any]]:
with (await self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
time_now = self._clock.time_msec()
auth_pdus = await self.handler.on_event_auth(event_id)
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
return 200, res
@log_function
async def on_query_client_keys(
self, origin: str, content: Dict[str, str]
) -> Tuple[int, Dict[str, Any]]:
return await self.on_query_request("client_keys", content)
async def on_query_user_devices(
self, origin: str, user_id: str
) -> Tuple[int, Dict[str, Any]]:
keys = await self.device_handler.on_federation_query_user_devices(user_id)
return 200, keys
@trace
async def on_claim_client_keys(
self, origin: str, content: JsonDict
) -> Dict[str, Any]:
query = []
for user_id, device_keys in content.get("one_time_keys", {}).items():
for device_id, algorithm in device_keys.items():
query.append((user_id, device_id, algorithm))
log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
results = await self.store.claim_e2e_one_time_keys(query)
json_result = {} # type: Dict[str, Dict[str, dict]]
for user_id, device_keys in results.items():
for device_id, keys in device_keys.items():
for key_id, json_str in keys.items():
json_result.setdefault(user_id, {})[device_id] = {
key_id: json_decoder.decode(json_str)
}
logger.info(
"Claimed one-time-keys: %s",
",".join(
(
"%s for %s:%s" % (key_id, user_id, device_id)
for user_id, user_keys in json_result.items()
for device_id, device_keys in user_keys.items()
for key_id, _ in device_keys.items()
)
),
)
return {"one_time_keys": json_result}
async def on_get_missing_events(
self,
origin: str,
room_id: str,
earliest_events: List[str],
latest_events: List[str],
limit: int,
) -> Dict[str, list]:
with (await self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
logger.debug(
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
" limit: %d",
earliest_events,
latest_events,
limit,
)
missing_events = await self.handler.on_get_missing_events(
origin, room_id, earliest_events, latest_events, limit
)
if len(missing_events) < 5:
logger.debug(
"Returning %d events: %r", len(missing_events), missing_events
)
else:
logger.debug("Returning %d events", len(missing_events))
time_now = self._clock.time_msec()
return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]}
@log_function
async def on_openid_userinfo(self, token: str) -> Optional[str]:
ts_now_ms = self._clock.time_msec()
return await self.store.get_user_id_for_open_id_token(token, ts_now_ms)
def _transaction_from_pdus(self, pdu_list: List[EventBase]) -> Transaction:
"""Returns a new Transaction containing the given PDUs suitable for
transmission.
"""
time_now = self._clock.time_msec()
pdus = [p.get_pdu_json(time_now) for p in pdu_list]
return Transaction(
origin=self.server_name,
pdus=pdus,
origin_server_ts=int(time_now),
destination=None,
)
async def _handle_received_pdu(self, origin: str, pdu: EventBase) -> None:
""" Process a PDU received in a federation /send/ transaction.
If the event is invalid, then this method throws a FederationError.
(The error will then be logged and sent back to the sender (which
probably won't do anything with it), and other events in the
transaction will be processed as normal).
It is likely that we'll then receive other events which refer to
this rejected_event in their prev_events, etc. When that happens,
we'll attempt to fetch the rejected event again, which will presumably
fail, so those second-generation events will also get rejected.
Eventually, we get to the point where there are more than 10 events
between any new events and the original rejected event. Since we
only try to backfill 10 events deep on received pdu, we then accept the
new event, possibly introducing a discontinuity in the DAG, with new
forward extremities, so normal service is approximately returned,
until we try to backfill across the discontinuity.
Args:
origin: server which sent the pdu
pdu: received pdu
Raises: FederationError if the signatures / hash do not match, or
if the event was unacceptable for any other reason (eg, too large,
too many prev_events, couldn't find the prev_events)
"""
# check that it's actually being sent from a valid destination to
# workaround bug #1753 in 0.18.5 and 0.18.6
if origin != get_domain_from_id(pdu.sender):
# We continue to accept join events from any server; this is
# necessary for the federation join dance to work correctly.
# (When we join over federation, the "helper" server is
# responsible for sending out the join event, rather than the
# origin. See bug #1893. This is also true for some third party
# invites).
if not (
pdu.type == "m.room.member"
and pdu.content
and pdu.content.get("membership", None)
in (Membership.JOIN, Membership.INVITE)
):
logger.info(
"Discarding PDU %s from invalid origin %s", pdu.event_id, origin
)
return
else:
logger.info("Accepting join PDU %s from %s", pdu.event_id, origin)
# We've already checked that we know the room version by this point
room_version = await self.store.get_room_version(pdu.room_id)
# Check signature.
try:
pdu = await self._check_sigs_and_hash(room_version, pdu)
except SynapseError as e:
raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id)
await self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
def __str__(self):
return "<ReplicationLayer(%s)>" % self.server_name
async def exchange_third_party_invite(
self, sender_user_id: str, target_user_id: str, room_id: str, signed: Dict
):
ret = await self.handler.exchange_third_party_invite(
sender_user_id, target_user_id, room_id, signed
)
return ret
async def on_exchange_third_party_invite_request(
self, room_id: str, event_dict: Dict
):
ret = await self.handler.on_exchange_third_party_invite_request(
room_id, event_dict
)
return ret
async def check_server_matches_acl(self, server_name: str, room_id: str):
"""Check if the given server is allowed by the server ACLs in the room
Args:
server_name: name of server, *without any port part*
room_id: ID of the room to check
Raises:
AuthError if the server does not match the ACL
"""
state_ids = await self.store.get_current_state_ids(room_id)
acl_event_id = state_ids.get((EventTypes.ServerACL, ""))
if not acl_event_id:
return
acl_event = await self.store.get_event(acl_event_id)
if server_matches_acl_event(server_name, acl_event):
return
raise AuthError(code=403, msg="Server is banned from room")
def server_matches_acl_event(server_name: str, acl_event: EventBase) -> bool:
"""Check if the given server is allowed by the ACL event
Args:
server_name: name of server, without any port part
acl_event: m.room.server_acl event
Returns:
True if this server is allowed by the ACLs
"""
logger.debug("Checking %s against acl %s", server_name, acl_event.content)
# first of all, check if literal IPs are blocked, and if so, whether the
# server name is a literal IP
allow_ip_literals = acl_event.content.get("allow_ip_literals", True)
if not isinstance(allow_ip_literals, bool):
logger.warning("Ignoring non-bool allow_ip_literals flag")
allow_ip_literals = True
if not allow_ip_literals:
# check for ipv6 literals. These start with '['.
if server_name[0] == "[":
return False
# check for ipv4 literals. We can just lift the routine from twisted.
if isIPAddress(server_name):
return False
# next, check the deny list
deny = acl_event.content.get("deny", [])
if not isinstance(deny, (list, tuple)):
logger.warning("Ignoring non-list deny ACL %s", deny)
deny = []
for e in deny:
if _acl_entry_matches(server_name, e):
# logger.info("%s matched deny rule %s", server_name, e)
return False
# then the allow list.
allow = acl_event.content.get("allow", [])
if not isinstance(allow, (list, tuple)):
logger.warning("Ignoring non-list allow ACL %s", allow)
allow = []
for e in allow:
if _acl_entry_matches(server_name, e):
# logger.info("%s matched allow rule %s", server_name, e)
return True
# everything else should be rejected.
# logger.info("%s fell through", server_name)
return False
def _acl_entry_matches(server_name: str, acl_entry: Any) -> bool:
if not isinstance(acl_entry, str):
logger.warning(
"Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry)
)
return False
regex = glob_to_regex(acl_entry)
return bool(regex.match(server_name))
class FederationHandlerRegistry:
"""Allows classes to register themselves as handlers for a given EDU or
query type for incoming federation traffic.
"""
def __init__(self, hs: "HomeServer"):
self.config = hs.config
self.http_client = hs.get_simple_http_client()
self.clock = hs.get_clock()
self._instance_name = hs.get_instance_name()
# These are safe to load in monolith mode, but will explode if we try
# and use them. However we have guards before we use them to ensure that
# we don't route to ourselves, and in monolith mode that will always be
# the case.
self._get_query_client = ReplicationGetQueryRestServlet.make_client(hs)
self._send_edu = ReplicationFederationSendEduRestServlet.make_client(hs)
self.edu_handlers = (
{}
) # type: Dict[str, Callable[[str, dict], Awaitable[None]]]
self.query_handlers = {} # type: Dict[str, Callable[[dict], Awaitable[None]]]
# Map from type to instance name that we should route EDU handling to.
self._edu_type_to_instance = {} # type: Dict[str, str]
def register_edu_handler(
self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]]
):
"""Sets the handler callable that will be used to handle an incoming
federation EDU of the given type.
Args:
edu_type: The type of the incoming EDU to register handler for
handler: A callable invoked on incoming EDU
of the given type. The arguments are the origin server name and
the EDU contents.
"""
if edu_type in self.edu_handlers:
raise KeyError("Already have an EDU handler for %s" % (edu_type,))
logger.info("Registering federation EDU handler for %r", edu_type)
self.edu_handlers[edu_type] = handler
def register_query_handler(
self, query_type: str, handler: Callable[[dict], defer.Deferred]
):
"""Sets the handler callable that will be used to handle an incoming
federation query of the given type.
Args:
query_type: Category name of the query, which should match
the string used by make_query.
handler: Invoked to handle
incoming queries of this type. The return will be yielded
on and the result used as the response to the query request.
"""
if query_type in self.query_handlers:
raise KeyError("Already have a Query handler for %s" % (query_type,))
logger.info("Registering federation query handler for %r", query_type)
self.query_handlers[query_type] = handler
def register_instance_for_edu(self, edu_type: str, instance_name: str):
"""Register that the EDU handler is on a different instance than master.
"""
self._edu_type_to_instance[edu_type] = instance_name
async def on_edu(self, edu_type: str, origin: str, content: dict):
if not self.config.use_presence and edu_type == "m.presence":
return
# Check if we have a handler on this instance
handler = self.edu_handlers.get(edu_type)
if handler:
with start_active_span_from_edu(content, "handle_edu"):
try:
await handler(origin, content)
except SynapseError as e:
logger.info("Failed to handle edu %r: %r", edu_type, e)
except Exception:
logger.exception("Failed to handle edu %r", edu_type)
return
# Check if we can route it somewhere else that isn't us
route_to = self._edu_type_to_instance.get(edu_type, "master")
if route_to != self._instance_name:
try:
await self._send_edu(
instance_name=route_to,
edu_type=edu_type,
origin=origin,
content=content,
)
except SynapseError as e:
logger.info("Failed to handle edu %r: %r", edu_type, e)
except Exception:
logger.exception("Failed to handle edu %r", edu_type)
return
# Oh well, let's just log and move on.
logger.warning("No handler registered for EDU type %s", edu_type)
async def on_query(self, query_type: str, args: dict):
handler = self.query_handlers.get(query_type)
if handler:
return await handler(args)
# Check if we can route it somewhere else that isn't us
if self._instance_name == "master":
return await self._get_query_client(query_type=query_type, args=args)
# Uh oh, no handler! Let's raise an exception so the request returns an
# error.
logger.warning("No handler registered for query type %s", query_type)
raise NotFoundError("No handler for Query type '%s'" % (query_type,))
| ./CrossVul/dataset_final_sorted/CWE-400/py/bad_4373_1 |
crossvul-python_data_bad_1916_1 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import urllib.parse
from io import BytesIO
from typing import (
TYPE_CHECKING,
Any,
BinaryIO,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Union,
)
import treq
from canonicaljson import encode_canonical_json
from netaddr import IPAddress, IPSet
from prometheus_client import Counter
from zope.interface import implementer, provider
from OpenSSL import SSL
from OpenSSL.SSL import VERIFY_NONE
from twisted.internet import defer, error as twisted_error, protocol, ssl
from twisted.internet.interfaces import (
IAddress,
IHostResolution,
IReactorPluggableNameResolver,
IResolutionReceiver,
)
from twisted.internet.task import Cooperator
from twisted.python.failure import Failure
from twisted.web._newclient import ResponseDone
from twisted.web.client import (
Agent,
HTTPConnectionPool,
ResponseNeverReceived,
readBody,
)
from twisted.web.http import PotentialDataLoss
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent, IBodyProducer, IResponse
from synapse.api.errors import Codes, HttpResponseException, SynapseError
from synapse.http import QuieterFileBodyProducer, RequestTimedOutError, redact_uri
from synapse.http.proxyagent import ProxyAgent
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.opentracing import set_tag, start_active_span, tags
from synapse.util import json_decoder
from synapse.util.async_helpers import timeout_deferred
if TYPE_CHECKING:
from synapse.app.homeserver import HomeServer
logger = logging.getLogger(__name__)
outgoing_requests_counter = Counter("synapse_http_client_requests", "", ["method"])
incoming_responses_counter = Counter(
"synapse_http_client_responses", "", ["method", "code"]
)
# the type of the headers list, to be passed to the t.w.h.Headers.
# Actually we can mix str and bytes keys, but Mapping treats 'key' as invariant so
# we simplify.
RawHeaders = Union[Mapping[str, "RawHeaderValue"], Mapping[bytes, "RawHeaderValue"]]
# the value actually has to be a List, but List is invariant so we can't specify that
# the entries can either be Lists or bytes.
RawHeaderValue = Sequence[Union[str, bytes]]
# the type of the query params, to be passed into `urlencode`
QueryParamValue = Union[str, bytes, Iterable[Union[str, bytes]]]
QueryParams = Union[Mapping[str, QueryParamValue], Mapping[bytes, QueryParamValue]]
def check_against_blacklist(
ip_address: IPAddress, ip_whitelist: Optional[IPSet], ip_blacklist: IPSet
) -> bool:
"""
Compares an IP address to allowed and disallowed IP sets.
Args:
ip_address: The IP address to check
ip_whitelist: Allowed IP addresses.
ip_blacklist: Disallowed IP addresses.
Returns:
True if the IP address is in the blacklist and not in the whitelist.
"""
if ip_address in ip_blacklist:
if ip_whitelist is None or ip_address not in ip_whitelist:
return True
return False
_EPSILON = 0.00000001
def _make_scheduler(reactor):
"""Makes a schedular suitable for a Cooperator using the given reactor.
(This is effectively just a copy from `twisted.internet.task`)
"""
def _scheduler(x):
return reactor.callLater(_EPSILON, x)
return _scheduler
class _IPBlacklistingResolver:
"""
A proxy for reactor.nameResolver which only produces non-blacklisted IP
addresses, preventing DNS rebinding attacks on URL preview.
"""
def __init__(
self,
reactor: IReactorPluggableNameResolver,
ip_whitelist: Optional[IPSet],
ip_blacklist: IPSet,
):
"""
Args:
reactor: The twisted reactor.
ip_whitelist: IP addresses to allow.
ip_blacklist: IP addresses to disallow.
"""
self._reactor = reactor
self._ip_whitelist = ip_whitelist
self._ip_blacklist = ip_blacklist
def resolveHostName(
self, recv: IResolutionReceiver, hostname: str, portNumber: int = 0
) -> IResolutionReceiver:
r = recv()
addresses = [] # type: List[IAddress]
def _callback() -> None:
r.resolutionBegan(None)
has_bad_ip = False
for i in addresses:
ip_address = IPAddress(i.host)
if check_against_blacklist(
ip_address, self._ip_whitelist, self._ip_blacklist
):
logger.info(
"Dropped %s from DNS resolution to %s due to blacklist"
% (ip_address, hostname)
)
has_bad_ip = True
# if we have a blacklisted IP, we'd like to raise an error to block the
# request, but all we can really do from here is claim that there were no
# valid results.
if not has_bad_ip:
for i in addresses:
r.addressResolved(i)
r.resolutionComplete()
@provider(IResolutionReceiver)
class EndpointReceiver:
@staticmethod
def resolutionBegan(resolutionInProgress: IHostResolution) -> None:
pass
@staticmethod
def addressResolved(address: IAddress) -> None:
addresses.append(address)
@staticmethod
def resolutionComplete() -> None:
_callback()
self._reactor.nameResolver.resolveHostName(
EndpointReceiver, hostname, portNumber=portNumber
)
return r
@implementer(IReactorPluggableNameResolver)
class BlacklistingReactorWrapper:
"""
A Reactor wrapper which will prevent DNS resolution to blacklisted IP
addresses, to prevent DNS rebinding.
"""
def __init__(
self,
reactor: IReactorPluggableNameResolver,
ip_whitelist: Optional[IPSet],
ip_blacklist: IPSet,
):
self._reactor = reactor
# We need to use a DNS resolver which filters out blacklisted IP
# addresses, to prevent DNS rebinding.
self._nameResolver = _IPBlacklistingResolver(
self._reactor, ip_whitelist, ip_blacklist
)
def __getattr__(self, attr: str) -> Any:
# Passthrough to the real reactor except for the DNS resolver.
if attr == "nameResolver":
return self._nameResolver
else:
return getattr(self._reactor, attr)
class BlacklistingAgentWrapper(Agent):
"""
An Agent wrapper which will prevent access to IP addresses being accessed
directly (without an IP address lookup).
"""
def __init__(
self,
agent: IAgent,
ip_whitelist: Optional[IPSet] = None,
ip_blacklist: Optional[IPSet] = None,
):
"""
Args:
agent: The Agent to wrap.
ip_whitelist: IP addresses to allow.
ip_blacklist: IP addresses to disallow.
"""
self._agent = agent
self._ip_whitelist = ip_whitelist
self._ip_blacklist = ip_blacklist
def request(
self,
method: bytes,
uri: bytes,
headers: Optional[Headers] = None,
bodyProducer: Optional[IBodyProducer] = None,
) -> defer.Deferred:
h = urllib.parse.urlparse(uri.decode("ascii"))
try:
ip_address = IPAddress(h.hostname)
if check_against_blacklist(
ip_address, self._ip_whitelist, self._ip_blacklist
):
logger.info("Blocking access to %s due to blacklist" % (ip_address,))
e = SynapseError(403, "IP address blocked by IP blacklist entry")
return defer.fail(Failure(e))
except Exception:
# Not an IP
pass
return self._agent.request(
method, uri, headers=headers, bodyProducer=bodyProducer
)
class SimpleHttpClient:
"""
A simple, no-frills HTTP client with methods that wrap up common ways of
using HTTP in Matrix
"""
def __init__(
self,
hs: "HomeServer",
treq_args: Dict[str, Any] = {},
ip_whitelist: Optional[IPSet] = None,
ip_blacklist: Optional[IPSet] = None,
http_proxy: Optional[bytes] = None,
https_proxy: Optional[bytes] = None,
):
"""
Args:
hs
treq_args: Extra keyword arguments to be given to treq.request.
ip_blacklist: The IP addresses that are blacklisted that
we may not request.
ip_whitelist: The whitelisted IP addresses, that we can
request if it were otherwise caught in a blacklist.
http_proxy: proxy server to use for http connections. host[:port]
https_proxy: proxy server to use for https connections. host[:port]
"""
self.hs = hs
self._ip_whitelist = ip_whitelist
self._ip_blacklist = ip_blacklist
self._extra_treq_args = treq_args
self.user_agent = hs.version_string
self.clock = hs.get_clock()
if hs.config.user_agent_suffix:
self.user_agent = "%s %s" % (self.user_agent, hs.config.user_agent_suffix)
# We use this for our body producers to ensure that they use the correct
# reactor.
self._cooperator = Cooperator(scheduler=_make_scheduler(hs.get_reactor()))
self.user_agent = self.user_agent.encode("ascii")
if self._ip_blacklist:
# If we have an IP blacklist, we need to use a DNS resolver which
# filters out blacklisted IP addresses, to prevent DNS rebinding.
self.reactor = BlacklistingReactorWrapper(
hs.get_reactor(), self._ip_whitelist, self._ip_blacklist
)
else:
self.reactor = hs.get_reactor()
# the pusher makes lots of concurrent SSL connections to sygnal, and
# tends to do so in batches, so we need to allow the pool to keep
# lots of idle connections around.
pool = HTTPConnectionPool(self.reactor)
# XXX: The justification for using the cache factor here is that larger instances
# will need both more cache and more connections.
# Still, this should probably be a separate dial
pool.maxPersistentPerHost = max((100 * hs.config.caches.global_factor, 5))
pool.cachedConnectionTimeout = 2 * 60
self.agent = ProxyAgent(
self.reactor,
connectTimeout=15,
contextFactory=self.hs.get_http_client_context_factory(),
pool=pool,
http_proxy=http_proxy,
https_proxy=https_proxy,
)
if self._ip_blacklist:
# If we have an IP blacklist, we then install the blacklisting Agent
# which prevents direct access to IP addresses, that are not caught
# by the DNS resolution.
self.agent = BlacklistingAgentWrapper(
self.agent,
ip_whitelist=self._ip_whitelist,
ip_blacklist=self._ip_blacklist,
)
async def request(
self,
method: str,
uri: str,
data: Optional[bytes] = None,
headers: Optional[Headers] = None,
) -> IResponse:
"""
Args:
method: HTTP method to use.
uri: URI to query.
data: Data to send in the request body, if applicable.
headers: Request headers.
Returns:
Response object, once the headers have been read.
Raises:
RequestTimedOutError if the request times out before the headers are read
"""
outgoing_requests_counter.labels(method).inc()
# log request but strip `access_token` (AS requests for example include this)
logger.debug("Sending request %s %s", method, redact_uri(uri))
with start_active_span(
"outgoing-client-request",
tags={
tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT,
tags.HTTP_METHOD: method,
tags.HTTP_URL: uri,
},
finish_on_close=True,
):
try:
body_producer = None
if data is not None:
body_producer = QuieterFileBodyProducer(
BytesIO(data), cooperator=self._cooperator,
)
request_deferred = treq.request(
method,
uri,
agent=self.agent,
data=body_producer,
headers=headers,
**self._extra_treq_args,
) # type: defer.Deferred
# we use our own timeout mechanism rather than treq's as a workaround
# for https://twistedmatrix.com/trac/ticket/9534.
request_deferred = timeout_deferred(
request_deferred, 60, self.hs.get_reactor(),
)
# turn timeouts into RequestTimedOutErrors
request_deferred.addErrback(_timeout_to_request_timed_out_error)
response = await make_deferred_yieldable(request_deferred)
incoming_responses_counter.labels(method, response.code).inc()
logger.info(
"Received response to %s %s: %s",
method,
redact_uri(uri),
response.code,
)
return response
except Exception as e:
incoming_responses_counter.labels(method, "ERR").inc()
logger.info(
"Error sending request to %s %s: %s %s",
method,
redact_uri(uri),
type(e).__name__,
e.args[0],
)
set_tag(tags.ERROR, True)
set_tag("error_reason", e.args[0])
raise
async def post_urlencoded_get_json(
self,
uri: str,
args: Optional[Mapping[str, Union[str, List[str]]]] = None,
headers: Optional[RawHeaders] = None,
) -> Any:
"""
Args:
uri: uri to query
args: parameters to be url-encoded in the body
headers: a map from header name to a list of values for that header
Returns:
parsed json
Raises:
RequestTimedOutError: if there is a timeout before the response headers
are received. Note there is currently no timeout on reading the response
body.
HttpResponseException: On a non-2xx HTTP response.
ValueError: if the response was not JSON
"""
# TODO: Do we ever want to log message contents?
logger.debug("post_urlencoded_get_json args: %s", args)
query_bytes = encode_query_args(args)
actual_headers = {
b"Content-Type": [b"application/x-www-form-urlencoded"],
b"User-Agent": [self.user_agent],
b"Accept": [b"application/json"],
}
if headers:
actual_headers.update(headers) # type: ignore
response = await self.request(
"POST", uri, headers=Headers(actual_headers), data=query_bytes
)
body = await make_deferred_yieldable(readBody(response))
if 200 <= response.code < 300:
return json_decoder.decode(body.decode("utf-8"))
else:
raise HttpResponseException(
response.code, response.phrase.decode("ascii", errors="replace"), body
)
async def post_json_get_json(
self, uri: str, post_json: Any, headers: Optional[RawHeaders] = None
) -> Any:
"""
Args:
uri: URI to query.
post_json: request body, to be encoded as json
headers: a map from header name to a list of values for that header
Returns:
parsed json
Raises:
RequestTimedOutError: if there is a timeout before the response headers
are received. Note there is currently no timeout on reading the response
body.
HttpResponseException: On a non-2xx HTTP response.
ValueError: if the response was not JSON
"""
json_str = encode_canonical_json(post_json)
logger.debug("HTTP POST %s -> %s", json_str, uri)
actual_headers = {
b"Content-Type": [b"application/json"],
b"User-Agent": [self.user_agent],
b"Accept": [b"application/json"],
}
if headers:
actual_headers.update(headers) # type: ignore
response = await self.request(
"POST", uri, headers=Headers(actual_headers), data=json_str
)
body = await make_deferred_yieldable(readBody(response))
if 200 <= response.code < 300:
return json_decoder.decode(body.decode("utf-8"))
else:
raise HttpResponseException(
response.code, response.phrase.decode("ascii", errors="replace"), body
)
async def get_json(
self,
uri: str,
args: Optional[QueryParams] = None,
headers: Optional[RawHeaders] = None,
) -> Any:
"""Gets some json from the given URI.
Args:
uri: The URI to request, not including query parameters
args: A dictionary used to create query string
headers: a map from header name to a list of values for that header
Returns:
Succeeds when we get a 2xx HTTP response, with the HTTP body as JSON.
Raises:
RequestTimedOutError: if there is a timeout before the response headers
are received. Note there is currently no timeout on reading the response
body.
HttpResponseException On a non-2xx HTTP response.
ValueError: if the response was not JSON
"""
actual_headers = {b"Accept": [b"application/json"]}
if headers:
actual_headers.update(headers) # type: ignore
body = await self.get_raw(uri, args, headers=headers)
return json_decoder.decode(body.decode("utf-8"))
async def put_json(
self,
uri: str,
json_body: Any,
args: Optional[QueryParams] = None,
headers: RawHeaders = None,
) -> Any:
"""Puts some json to the given URI.
Args:
uri: The URI to request, not including query parameters
json_body: The JSON to put in the HTTP body,
args: A dictionary used to create query strings
headers: a map from header name to a list of values for that header
Returns:
Succeeds when we get a 2xx HTTP response, with the HTTP body as JSON.
Raises:
RequestTimedOutError: if there is a timeout before the response headers
are received. Note there is currently no timeout on reading the response
body.
HttpResponseException On a non-2xx HTTP response.
ValueError: if the response was not JSON
"""
if args:
query_str = urllib.parse.urlencode(args, True)
uri = "%s?%s" % (uri, query_str)
json_str = encode_canonical_json(json_body)
actual_headers = {
b"Content-Type": [b"application/json"],
b"User-Agent": [self.user_agent],
b"Accept": [b"application/json"],
}
if headers:
actual_headers.update(headers) # type: ignore
response = await self.request(
"PUT", uri, headers=Headers(actual_headers), data=json_str
)
body = await make_deferred_yieldable(readBody(response))
if 200 <= response.code < 300:
return json_decoder.decode(body.decode("utf-8"))
else:
raise HttpResponseException(
response.code, response.phrase.decode("ascii", errors="replace"), body
)
async def get_raw(
self,
uri: str,
args: Optional[QueryParams] = None,
headers: Optional[RawHeaders] = None,
) -> bytes:
"""Gets raw text from the given URI.
Args:
uri: The URI to request, not including query parameters
args: A dictionary used to create query strings
headers: a map from header name to a list of values for that header
Returns:
Succeeds when we get a 2xx HTTP response, with the
HTTP body as bytes.
Raises:
RequestTimedOutError: if there is a timeout before the response headers
are received. Note there is currently no timeout on reading the response
body.
HttpResponseException on a non-2xx HTTP response.
"""
if args:
query_str = urllib.parse.urlencode(args, True)
uri = "%s?%s" % (uri, query_str)
actual_headers = {b"User-Agent": [self.user_agent]}
if headers:
actual_headers.update(headers) # type: ignore
response = await self.request("GET", uri, headers=Headers(actual_headers))
body = await make_deferred_yieldable(readBody(response))
if 200 <= response.code < 300:
return body
else:
raise HttpResponseException(
response.code, response.phrase.decode("ascii", errors="replace"), body
)
# XXX: FIXME: This is horribly copy-pasted from matrixfederationclient.
# The two should be factored out.
async def get_file(
self,
url: str,
output_stream: BinaryIO,
max_size: Optional[int] = None,
headers: Optional[RawHeaders] = None,
) -> Tuple[int, Dict[bytes, List[bytes]], str, int]:
"""GETs a file from a given URL
Args:
url: The URL to GET
output_stream: File to write the response body to.
headers: A map from header name to a list of values for that header
Returns:
A tuple of the file length, dict of the response
headers, absolute URI of the response and HTTP response code.
Raises:
RequestTimedOutError: if there is a timeout before the response headers
are received. Note there is currently no timeout on reading the response
body.
SynapseError: if the response is not a 2xx, the remote file is too large, or
another exception happens during the download.
"""
actual_headers = {b"User-Agent": [self.user_agent]}
if headers:
actual_headers.update(headers) # type: ignore
response = await self.request("GET", url, headers=Headers(actual_headers))
resp_headers = dict(response.headers.getAllRawHeaders())
if (
b"Content-Length" in resp_headers
and max_size
and int(resp_headers[b"Content-Length"][0]) > max_size
):
logger.warning("Requested URL is too large > %r bytes" % (max_size,))
raise SynapseError(
502,
"Requested file is too large > %r bytes" % (max_size,),
Codes.TOO_LARGE,
)
if response.code > 299:
logger.warning("Got %d when downloading %s" % (response.code, url))
raise SynapseError(502, "Got error %d" % (response.code,), Codes.UNKNOWN)
# TODO: if our Content-Type is HTML or something, just read the first
# N bytes into RAM rather than saving it all to disk only to read it
# straight back in again
try:
length = await make_deferred_yieldable(
readBodyToFile(response, output_stream, max_size)
)
except SynapseError:
# This can happen e.g. because the body is too large.
raise
except Exception as e:
raise SynapseError(502, ("Failed to download remote body: %s" % e)) from e
return (
length,
resp_headers,
response.request.absoluteURI.decode("ascii"),
response.code,
)
def _timeout_to_request_timed_out_error(f: Failure):
if f.check(twisted_error.TimeoutError, twisted_error.ConnectingCancelledError):
# The TCP connection has its own timeout (set by the 'connectTimeout' param
# on the Agent), which raises twisted_error.TimeoutError exception.
raise RequestTimedOutError("Timeout connecting to remote server")
elif f.check(defer.TimeoutError, ResponseNeverReceived):
# this one means that we hit our overall timeout on the request
raise RequestTimedOutError("Timeout waiting for response from remote server")
return f
class _ReadBodyToFileProtocol(protocol.Protocol):
def __init__(
self, stream: BinaryIO, deferred: defer.Deferred, max_size: Optional[int]
):
self.stream = stream
self.deferred = deferred
self.length = 0
self.max_size = max_size
def dataReceived(self, data: bytes) -> None:
self.stream.write(data)
self.length += len(data)
if self.max_size is not None and self.length >= self.max_size:
self.deferred.errback(
SynapseError(
502,
"Requested file is too large > %r bytes" % (self.max_size,),
Codes.TOO_LARGE,
)
)
self.deferred = defer.Deferred()
self.transport.loseConnection()
def connectionLost(self, reason: Failure) -> None:
if reason.check(ResponseDone):
self.deferred.callback(self.length)
elif reason.check(PotentialDataLoss):
# stolen from https://github.com/twisted/treq/pull/49/files
# http://twistedmatrix.com/trac/ticket/4840
self.deferred.callback(self.length)
else:
self.deferred.errback(reason)
def readBodyToFile(
response: IResponse, stream: BinaryIO, max_size: Optional[int]
) -> defer.Deferred:
"""
Read a HTTP response body to a file-object. Optionally enforcing a maximum file size.
Args:
response: The HTTP response to read from.
stream: The file-object to write to.
max_size: The maximum file size to allow.
Returns:
A Deferred which resolves to the length of the read body.
"""
d = defer.Deferred()
response.deliverBody(_ReadBodyToFileProtocol(stream, d, max_size))
return d
def encode_query_args(args: Optional[Mapping[str, Union[str, List[str]]]]) -> bytes:
"""
Encodes a map of query arguments to bytes which can be appended to a URL.
Args:
args: The query arguments, a mapping of string to string or list of strings.
Returns:
The query arguments encoded as bytes.
"""
if args is None:
return b""
encoded_args = {}
for k, vs in args.items():
if isinstance(vs, str):
vs = [vs]
encoded_args[k] = [v.encode("utf8") for v in vs]
query_str = urllib.parse.urlencode(encoded_args, True)
return query_str.encode("utf8")
class InsecureInterceptableContextFactory(ssl.ContextFactory):
"""
Factory for PyOpenSSL SSL contexts which accepts any certificate for any domain.
Do not use this since it allows an attacker to intercept your communications.
"""
def __init__(self):
self._context = SSL.Context(SSL.SSLv23_METHOD)
self._context.set_verify(VERIFY_NONE, lambda *_: None)
def getContext(self, hostname=None, port=None):
return self._context
def creatorForNetloc(self, hostname, port):
return self
| ./CrossVul/dataset_final_sorted/CWE-400/py/bad_1916_1 |
crossvul-python_data_good_4602_1 | from __future__ import absolute_import
import re
from collections import namedtuple
from ..exceptions import LocationParseError
from ..packages import six, rfc3986
from ..packages.rfc3986.exceptions import RFC3986Exception, ValidationError
from ..packages.rfc3986.validators import Validator
from ..packages.rfc3986 import abnf_regexp, normalizers, compat, misc
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
# We only want to normalize urls with an HTTP(S) scheme.
# urllib3 infers URLs without a scheme (None) to be http.
NORMALIZABLE_SCHEMES = ('http', 'https', None)
# Regex for detecting URLs with schemes. RFC 3986 Section 3.1
SCHEME_REGEX = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+\-]*:|/)")
PATH_CHARS = abnf_regexp.UNRESERVED_CHARS_SET | abnf_regexp.SUB_DELIMITERS_SET | {':', '@', '/'}
QUERY_CHARS = FRAGMENT_CHARS = PATH_CHARS | {'?'}
class Url(namedtuple('Url', url_attrs)):
"""
Data structure for representing an HTTP URL. Used as a return value for
:func:`parse_url`. Both the scheme and host are normalized as they are
both case-insensitive according to RFC 3986.
"""
__slots__ = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
if scheme is not None:
scheme = scheme.lower()
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = u''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + u'://'
if auth is not None:
url += auth + u'@'
if host is not None:
url += host
if port is not None:
url += u':' + str(port)
if path is not None:
url += path
if query is not None:
url += u'?' + query
if fragment is not None:
url += u'#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
.. deprecated:: 1.25
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx + 1:], min_delim
def _encode_invalid_chars(component, allowed_chars, encoding='utf-8'):
"""Percent-encodes a URI component without reapplying
onto an already percent-encoded component. Based on
rfc3986.normalizers.encode_component()
"""
if component is None:
return component
# Try to see if the component we're encoding is already percent-encoded
# so we can skip all '%' characters but still encode all others.
percent_encodings = len(normalizers.PERCENT_MATCHER.findall(
compat.to_str(component, encoding)))
uri_bytes = component.encode('utf-8', 'surrogatepass')
is_percent_encoded = percent_encodings == uri_bytes.count(b'%')
encoded_component = bytearray()
for i in range(0, len(uri_bytes)):
# Will return a single character bytestring on both Python 2 & 3
byte = uri_bytes[i:i+1]
byte_ord = ord(byte)
if ((is_percent_encoded and byte == b'%')
or (byte_ord < 128 and byte.decode() in allowed_chars)):
encoded_component.extend(byte)
continue
encoded_component.extend('%{0:02x}'.format(byte_ord).encode().upper())
return encoded_component.decode(encoding)
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
This parser is RFC 3986 compliant.
:param str url: URL to parse into a :class:`.Url` namedtuple.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
if not url:
# Empty
return Url()
is_string = not isinstance(url, six.binary_type)
# RFC 3986 doesn't like URLs that have a host but don't start
# with a scheme and we support URLs like that so we need to
# detect that problem and add an empty scheme indication.
# We don't get hurt on path-only URLs here as it's stripped
# off and given an empty scheme anyways.
if not SCHEME_REGEX.search(url):
url = "//" + url
def idna_encode(name):
if name and any([ord(x) > 128 for x in name]):
try:
import idna
except ImportError:
raise LocationParseError("Unable to parse URL without the 'idna' module")
try:
return idna.encode(name.lower(), strict=True, std3_rules=True)
except idna.IDNAError:
raise LocationParseError(u"Name '%s' is not a valid IDNA label" % name)
return name
try:
split_iri = misc.IRI_MATCHER.match(compat.to_str(url)).groupdict()
iri_ref = rfc3986.IRIReference(
split_iri['scheme'], split_iri['authority'],
_encode_invalid_chars(split_iri['path'], PATH_CHARS),
_encode_invalid_chars(split_iri['query'], QUERY_CHARS),
_encode_invalid_chars(split_iri['fragment'], FRAGMENT_CHARS)
)
has_authority = iri_ref.authority is not None
uri_ref = iri_ref.encode(idna_encoder=idna_encode)
except (ValueError, RFC3986Exception):
return six.raise_from(LocationParseError(url), None)
# rfc3986 strips the authority if it's invalid
if has_authority and uri_ref.authority is None:
raise LocationParseError(url)
# Only normalize schemes we understand to not break http+unix
# or other schemes that don't follow RFC 3986.
if uri_ref.scheme is None or uri_ref.scheme.lower() in NORMALIZABLE_SCHEMES:
uri_ref = uri_ref.normalize()
# Validate all URIReference components and ensure that all
# components that were set before are still set after
# normalization has completed.
validator = Validator()
try:
validator.check_validity_of(
*validator.COMPONENT_NAMES
).validate(uri_ref)
except ValidationError:
return six.raise_from(LocationParseError(url), None)
# For the sake of backwards compatibility we put empty
# string values for path if there are any defined values
# beyond the path in the URL.
# TODO: Remove this when we break backwards compatibility.
path = uri_ref.path
if not path:
if (uri_ref.query is not None
or uri_ref.fragment is not None):
path = ""
else:
path = None
# Ensure that each part of the URL is a `str` for
# backwards compatibility.
def to_input_type(x):
if x is None:
return None
elif not is_string and not isinstance(x, six.binary_type):
return x.encode('utf-8')
return x
return Url(
scheme=to_input_type(uri_ref.scheme),
auth=to_input_type(uri_ref.userinfo),
host=to_input_type(uri_ref.host),
port=int(uri_ref.port) if uri_ref.port is not None else None,
path=to_input_type(path),
query=to_input_type(uri_ref.query),
fragment=to_input_type(uri_ref.fragment)
)
def get_host(url):
"""
Deprecated. Use :func:`parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
| ./CrossVul/dataset_final_sorted/CWE-400/py/good_4602_1 |
crossvul-python_data_good_1916_2 | # -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
import time
from io import BytesIO
from typing import Callable, Dict, Optional, Tuple
import attr
from twisted.internet import defer
from twisted.internet.interfaces import IReactorTime
from twisted.web.client import RedirectAgent
from twisted.web.http import stringToDatetime
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent, IResponse
from synapse.http.client import BodyExceededMaxSize, read_body_with_max_size
from synapse.logging.context import make_deferred_yieldable
from synapse.util import Clock, json_decoder
from synapse.util.caches.ttlcache import TTLCache
from synapse.util.metrics import Measure
# period to cache .well-known results for by default
WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600
# jitter factor to add to the .well-known default cache ttls
WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 0.1
# period to cache failure to fetch .well-known for
WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600
# period to cache failure to fetch .well-known if there has recently been a
# valid well-known for that domain.
WELL_KNOWN_DOWN_CACHE_PERIOD = 2 * 60
# period to remember there was a valid well-known after valid record expires
WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID = 2 * 3600
# cap for .well-known cache period
WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600
# lower bound for .well-known cache period
WELL_KNOWN_MIN_CACHE_PERIOD = 5 * 60
# The maximum size (in bytes) to allow a well-known file to be.
WELL_KNOWN_MAX_SIZE = 50 * 1024 # 50 KiB
# Attempt to refetch a cached well-known N% of the TTL before it expires.
# e.g. if set to 0.2 and we have a cached entry with a TTL of 5mins, then
# we'll start trying to refetch 1 minute before it expires.
WELL_KNOWN_GRACE_PERIOD_FACTOR = 0.2
# Number of times we retry fetching a well-known for a domain we know recently
# had a valid entry.
WELL_KNOWN_RETRY_ATTEMPTS = 3
logger = logging.getLogger(__name__)
_well_known_cache = TTLCache("well-known")
_had_valid_well_known_cache = TTLCache("had-valid-well-known")
@attr.s(slots=True, frozen=True)
class WellKnownLookupResult:
delegated_server = attr.ib()
class WellKnownResolver:
"""Handles well-known lookups for matrix servers.
"""
def __init__(
self,
reactor: IReactorTime,
agent: IAgent,
user_agent: bytes,
well_known_cache: Optional[TTLCache] = None,
had_well_known_cache: Optional[TTLCache] = None,
):
self._reactor = reactor
self._clock = Clock(reactor)
if well_known_cache is None:
well_known_cache = _well_known_cache
if had_well_known_cache is None:
had_well_known_cache = _had_valid_well_known_cache
self._well_known_cache = well_known_cache
self._had_valid_well_known_cache = had_well_known_cache
self._well_known_agent = RedirectAgent(agent)
self.user_agent = user_agent
async def get_well_known(self, server_name: bytes) -> WellKnownLookupResult:
"""Attempt to fetch and parse a .well-known file for the given server
Args:
server_name: name of the server, from the requested url
Returns:
The result of the lookup
"""
try:
prev_result, expiry, ttl = self._well_known_cache.get_with_expiry(
server_name
)
now = self._clock.time()
if now < expiry - WELL_KNOWN_GRACE_PERIOD_FACTOR * ttl:
return WellKnownLookupResult(delegated_server=prev_result)
except KeyError:
prev_result = None
# TODO: should we linearise so that we don't end up doing two .well-known
# requests for the same server in parallel?
try:
with Measure(self._clock, "get_well_known"):
result, cache_period = await self._fetch_well_known(
server_name
) # type: Optional[bytes], float
except _FetchWellKnownFailure as e:
if prev_result and e.temporary:
# This is a temporary failure and we have a still valid cached
# result, so lets return that. Hopefully the next time we ask
# the remote will be back up again.
return WellKnownLookupResult(delegated_server=prev_result)
result = None
if self._had_valid_well_known_cache.get(server_name, False):
# We have recently seen a valid well-known record for this
# server, so we cache the lack of well-known for a shorter time.
cache_period = WELL_KNOWN_DOWN_CACHE_PERIOD
else:
cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD
# add some randomness to the TTL to avoid a stampeding herd
cache_period *= random.uniform(
1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
)
if cache_period > 0:
self._well_known_cache.set(server_name, result, cache_period)
return WellKnownLookupResult(delegated_server=result)
async def _fetch_well_known(self, server_name: bytes) -> Tuple[bytes, float]:
"""Actually fetch and parse a .well-known, without checking the cache
Args:
server_name: name of the server, from the requested url
Raises:
_FetchWellKnownFailure if we fail to lookup a result
Returns:
The lookup result and cache period.
"""
had_valid_well_known = self._had_valid_well_known_cache.get(server_name, False)
# We do this in two steps to differentiate between possibly transient
# errors (e.g. can't connect to host, 503 response) and more permanent
# errors (such as getting a 404 response).
response, body = await self._make_well_known_request(
server_name, retry=had_valid_well_known
)
try:
if response.code != 200:
raise Exception("Non-200 response %s" % (response.code,))
parsed_body = json_decoder.decode(body.decode("utf-8"))
logger.info("Response from .well-known: %s", parsed_body)
result = parsed_body["m.server"].encode("ascii")
except defer.CancelledError:
# Bail if we've been cancelled
raise
except Exception as e:
logger.info("Error parsing well-known for %s: %s", server_name, e)
raise _FetchWellKnownFailure(temporary=False)
cache_period = _cache_period_from_headers(
response.headers, time_now=self._reactor.seconds
)
if cache_period is None:
cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD
# add some randomness to the TTL to avoid a stampeding herd every 24 hours
# after startup
cache_period *= random.uniform(
1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
)
else:
cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD)
cache_period = max(cache_period, WELL_KNOWN_MIN_CACHE_PERIOD)
# We got a success, mark as such in the cache
self._had_valid_well_known_cache.set(
server_name,
bool(result),
cache_period + WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID,
)
return result, cache_period
async def _make_well_known_request(
self, server_name: bytes, retry: bool
) -> Tuple[IResponse, bytes]:
"""Make the well known request.
This will retry the request if requested and it fails (with unable
to connect or receives a 5xx error).
Args:
server_name: name of the server, from the requested url
retry: Whether to retry the request if it fails.
Raises:
_FetchWellKnownFailure if we fail to lookup a result
Returns:
Returns the response object and body. Response may be a non-200 response.
"""
uri = b"https://%s/.well-known/matrix/server" % (server_name,)
uri_str = uri.decode("ascii")
headers = {
b"User-Agent": [self.user_agent],
}
i = 0
while True:
i += 1
logger.info("Fetching %s", uri_str)
try:
response = await make_deferred_yieldable(
self._well_known_agent.request(
b"GET", uri, headers=Headers(headers)
)
)
body_stream = BytesIO()
await make_deferred_yieldable(
read_body_with_max_size(response, body_stream, WELL_KNOWN_MAX_SIZE)
)
body = body_stream.getvalue()
if 500 <= response.code < 600:
raise Exception("Non-200 response %s" % (response.code,))
return response, body
except defer.CancelledError:
# Bail if we've been cancelled
raise
except BodyExceededMaxSize:
# If the well-known file was too large, do not keep attempting
# to download it, but consider it a temporary error.
logger.warning(
"Requested .well-known file for %s is too large > %r bytes",
server_name.decode("ascii"),
WELL_KNOWN_MAX_SIZE,
)
raise _FetchWellKnownFailure(temporary=True)
except Exception as e:
if not retry or i >= WELL_KNOWN_RETRY_ATTEMPTS:
logger.info("Error fetching %s: %s", uri_str, e)
raise _FetchWellKnownFailure(temporary=True)
logger.info("Error fetching %s: %s. Retrying", uri_str, e)
# Sleep briefly in the hopes that they come back up
await self._clock.sleep(0.5)
def _cache_period_from_headers(
headers: Headers, time_now: Callable[[], float] = time.time
) -> Optional[float]:
cache_controls = _parse_cache_control(headers)
if b"no-store" in cache_controls:
return 0
if b"max-age" in cache_controls:
max_age = cache_controls[b"max-age"]
if max_age:
try:
return int(max_age)
except ValueError:
pass
expires = headers.getRawHeaders(b"expires")
if expires is not None:
try:
expires_date = stringToDatetime(expires[-1])
return expires_date - time_now()
except ValueError:
# RFC7234 says 'A cache recipient MUST interpret invalid date formats,
# especially the value "0", as representing a time in the past (i.e.,
# "already expired").
return 0
return None
def _parse_cache_control(headers: Headers) -> Dict[bytes, Optional[bytes]]:
cache_controls = {}
for hdr in headers.getRawHeaders(b"cache-control", []):
for directive in hdr.split(b","):
splits = [x.strip() for x in directive.split(b"=", 1)]
k = splits[0].lower()
v = splits[1] if len(splits) > 1 else None
cache_controls[k] = v
return cache_controls
@attr.s(slots=True)
class _FetchWellKnownFailure(Exception):
# True if we didn't get a non-5xx HTTP response, i.e. this may or may not be
# a temporary failure.
temporary = attr.ib()
| ./CrossVul/dataset_final_sorted/CWE-400/py/good_1916_2 |
crossvul-python_data_good_1090_0 | # Zulip's main markdown implementation. See docs/subsystems/markdown.md for
# detailed documentation on our markdown syntax.
from typing import (Any, Callable, Dict, Iterable, List, NamedTuple,
Optional, Set, Tuple, TypeVar, Union, cast)
from mypy_extensions import TypedDict
from typing.re import Match, Pattern
import markdown
import logging
import traceback
import urllib
import re
import os
import html
import time
import functools
import ujson
import xml.etree.cElementTree as etree
from xml.etree.cElementTree import Element
from collections import deque, defaultdict
import requests
from django.conf import settings
from django.db.models import Q
from markdown.extensions import codehilite, nl2br, tables
from zerver.lib.bugdown import fenced_code
from zerver.lib.bugdown.fenced_code import FENCE_RE
from zerver.lib.camo import get_camo_url
from zerver.lib.emoji import translate_emoticons, emoticon_regex
from zerver.lib.mention import possible_mentions, \
possible_user_group_mentions, extract_user_group
from zerver.lib.url_encoding import encode_stream
from zerver.lib.thumbnail import user_uploads_or_external
from zerver.lib.timeout import timeout, TimeoutExpired
from zerver.lib.cache import cache_with_key, NotFoundInCache
from zerver.lib.url_preview import preview as link_preview
from zerver.models import (
all_realm_filters,
get_active_streams,
MAX_MESSAGE_LENGTH,
Message,
Realm,
realm_filters_for_realm,
UserProfile,
UserGroup,
UserGroupMembership,
)
import zerver.lib.mention as mention
from zerver.lib.tex import render_tex
from zerver.lib.exceptions import BugdownRenderingException
ReturnT = TypeVar('ReturnT')
def one_time(method: Callable[[], ReturnT]) -> Callable[[], ReturnT]:
'''
Use this decorator with extreme caution.
The function you wrap should have no dependency
on any arguments (no args, no kwargs) nor should
it depend on any global state.
'''
val = None
def cache_wrapper() -> ReturnT:
nonlocal val
if val is None:
val = method()
return val
return cache_wrapper
FullNameInfo = TypedDict('FullNameInfo', {
'id': int,
'email': str,
'full_name': str,
})
DbData = Dict[str, Any]
# Format version of the bugdown rendering; stored along with rendered
# messages so that we can efficiently determine what needs to be re-rendered
version = 1
_T = TypeVar('_T')
ElementStringNone = Union[Element, Optional[str]]
AVATAR_REGEX = r'!avatar\((?P<email>[^)]*)\)'
GRAVATAR_REGEX = r'!gravatar\((?P<email>[^)]*)\)'
EMOJI_REGEX = r'(?P<syntax>:[\w\-\+]+:)'
def verbose_compile(pattern: str) -> Any:
return re.compile(
"^(.*?)%s(.*?)$" % pattern,
re.DOTALL | re.UNICODE | re.VERBOSE
)
def normal_compile(pattern: str) -> Any:
return re.compile(
r"^(.*?)%s(.*)$" % pattern,
re.DOTALL | re.UNICODE
)
STREAM_LINK_REGEX = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
\#\*\* # and after hash sign followed by double asterisks
(?P<stream_name>[^\*]+) # stream name can contain anything
\*\* # ends by double asterisks
"""
@one_time
def get_compiled_stream_link_regex() -> Pattern:
return verbose_compile(STREAM_LINK_REGEX)
LINK_REGEX = None # type: Pattern
def get_web_link_regex() -> str:
# We create this one time, but not at startup. So the
# first message rendered in any process will have some
# extra costs. It's roughly 75ms to run this code, so
# caching the value in LINK_REGEX is super important here.
global LINK_REGEX
if LINK_REGEX is not None:
return LINK_REGEX
tlds = '|'.join(list_of_tlds())
# A link starts at a word boundary, and ends at space, punctuation, or end-of-input.
#
# We detect a url either by the `https?://` or by building around the TLD.
# In lieu of having a recursive regex (which python doesn't support) to match
# arbitrary numbers of nested matching parenthesis, we manually build a regexp that
# can match up to six
# The inner_paren_contents chunk matches the innermore non-parenthesis-holding text,
# and the paren_group matches text with, optionally, a matching set of parens
inner_paren_contents = r"[^\s()\"]*"
paren_group = r"""
[^\s()\"]*? # Containing characters that won't end the URL
(?: \( %s \) # and more characters in matched parens
[^\s()\"]*? # followed by more characters
)* # zero-or-more sets of paired parens
"""
nested_paren_chunk = paren_group
for i in range(6):
nested_paren_chunk = nested_paren_chunk % (paren_group,)
nested_paren_chunk = nested_paren_chunk % (inner_paren_contents,)
file_links = r"| (?:file://(/[^/ ]*)+/?)" if settings.ENABLE_FILE_LINKS else r""
REGEX = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
# (Double-negative lookbehind to allow start-of-string)
(?P<url> # Main group
(?:(?: # Domain part
https?://[\w.:@-]+? # If it has a protocol, anything goes.
|(?: # Or, if not, be more strict to avoid false-positives
(?:[\w-]+\.)+ # One or more domain components, separated by dots
(?:%s) # TLDs (filled in via format from tlds-alpha-by-domain.txt)
)
)
(?:/ # A path, beginning with /
%s # zero-to-6 sets of paired parens
)?) # Path is optional
| (?:[\w.-]+\@[\w.-]+\.[\w]+) # Email is separate, since it can't have a path
%s # File path start with file:///, enable by setting ENABLE_FILE_LINKS=True
| (?:bitcoin:[13][a-km-zA-HJ-NP-Z1-9]{25,34}) # Bitcoin address pattern, see https://mokagio.github.io/tech-journal/2014/11/21/regex-bitcoin.html
)
(?= # URL must be followed by (not included in group)
[!:;\?\),\.\'\"\>]* # Optional punctuation characters
(?:\Z|\s) # followed by whitespace or end of string
)
""" % (tlds, nested_paren_chunk, file_links)
LINK_REGEX = verbose_compile(REGEX)
return LINK_REGEX
def clear_state_for_testing() -> None:
# The link regex never changes in production, but our tests
# try out both sides of ENABLE_FILE_LINKS, so we need
# a way to clear it.
global LINK_REGEX
LINK_REGEX = None
bugdown_logger = logging.getLogger()
def rewrite_local_links_to_relative(db_data: Optional[DbData], link: str) -> str:
""" If the link points to a local destination we can just switch to that
instead of opening a new tab. """
if db_data:
realm_uri_prefix = db_data['realm_uri'] + "/"
if link.startswith(realm_uri_prefix):
# +1 to skip the `/` before the hash link.
return link[len(realm_uri_prefix):]
return link
def url_embed_preview_enabled(message: Optional[Message]=None,
realm: Optional[Realm]=None,
no_previews: Optional[bool]=False) -> bool:
if not settings.INLINE_URL_EMBED_PREVIEW:
return False
if no_previews:
return False
if realm is None:
if message is not None:
realm = message.get_realm()
if realm is None:
# realm can be None for odd use cases
# like generating documentation or running
# test code
return True
return realm.inline_url_embed_preview
def image_preview_enabled(message: Optional[Message]=None,
realm: Optional[Realm]=None,
no_previews: Optional[bool]=False) -> bool:
if not settings.INLINE_IMAGE_PREVIEW:
return False
if no_previews:
return False
if realm is None:
if message is not None:
realm = message.get_realm()
if realm is None:
# realm can be None for odd use cases
# like generating documentation or running
# test code
return True
return realm.inline_image_preview
def list_of_tlds() -> List[str]:
# HACK we manually blacklist a few domains
blacklist = ['PY\n', "MD\n"]
# tlds-alpha-by-domain.txt comes from http://data.iana.org/TLD/tlds-alpha-by-domain.txt
tlds_file = os.path.join(os.path.dirname(__file__), 'tlds-alpha-by-domain.txt')
tlds = [tld.lower().strip() for tld in open(tlds_file, 'r')
if tld not in blacklist and not tld[0].startswith('#')]
tlds.sort(key=len, reverse=True)
return tlds
def walk_tree(root: Element,
processor: Callable[[Element], Optional[_T]],
stop_after_first: bool=False) -> List[_T]:
results = []
queue = deque([root])
while queue:
currElement = queue.popleft()
for child in currElement.getchildren():
if child.getchildren():
queue.append(child)
result = processor(child)
if result is not None:
results.append(result)
if stop_after_first:
return results
return results
ElementFamily = NamedTuple('ElementFamily', [
('grandparent', Optional[Element]),
('parent', Element),
('child', Element)
])
ResultWithFamily = NamedTuple('ResultWithFamily', [
('family', ElementFamily),
('result', Any)
])
ElementPair = NamedTuple('ElementPair', [
('parent', Optional[Element]),
('value', Element)
])
def walk_tree_with_family(root: Element,
processor: Callable[[Element], Optional[_T]]
) -> List[ResultWithFamily]:
results = []
queue = deque([ElementPair(parent=None, value=root)])
while queue:
currElementPair = queue.popleft()
for child in currElementPair.value.getchildren():
if child.getchildren():
queue.append(ElementPair(parent=currElementPair, value=child)) # type: ignore # Lack of Deque support in typing module for Python 3.4.3
result = processor(child)
if result is not None:
if currElementPair.parent is not None:
grandparent_element = cast(ElementPair, currElementPair.parent)
grandparent = grandparent_element.value
else:
grandparent = None
family = ElementFamily(
grandparent=grandparent,
parent=currElementPair.value,
child=child
)
results.append(ResultWithFamily(
family=family,
result=result
))
return results
# height is not actually used
def add_a(
root: Element,
url: str,
link: str,
title: Optional[str]=None,
desc: Optional[str]=None,
class_attr: str="message_inline_image",
data_id: Optional[str]=None,
insertion_index: Optional[int]=None,
already_thumbnailed: Optional[bool]=False
) -> None:
title = title if title is not None else url_filename(link)
title = title if title else ""
desc = desc if desc is not None else ""
if insertion_index is not None:
div = markdown.util.etree.Element("div")
root.insert(insertion_index, div)
else:
div = markdown.util.etree.SubElement(root, "div")
div.set("class", class_attr)
a = markdown.util.etree.SubElement(div, "a")
a.set("href", link)
a.set("target", "_blank")
a.set("title", title)
if data_id is not None:
a.set("data-id", data_id)
img = markdown.util.etree.SubElement(a, "img")
if settings.THUMBNAIL_IMAGES and (not already_thumbnailed) and user_uploads_or_external(url):
# See docs/thumbnailing.md for some high-level documentation.
#
# We strip leading '/' from relative URLs here to ensure
# consistency in what gets passed to /thumbnail
url = url.lstrip('/')
img.set("src", "/thumbnail?url={0}&size=thumbnail".format(
urllib.parse.quote(url, safe='')
))
img.set('data-src-fullsize', "/thumbnail?url={0}&size=full".format(
urllib.parse.quote(url, safe='')
))
else:
img.set("src", url)
if class_attr == "message_inline_ref":
summary_div = markdown.util.etree.SubElement(div, "div")
title_div = markdown.util.etree.SubElement(summary_div, "div")
title_div.set("class", "message_inline_image_title")
title_div.text = title
desc_div = markdown.util.etree.SubElement(summary_div, "desc")
desc_div.set("class", "message_inline_image_desc")
def add_embed(root: Element, link: str, extracted_data: Dict[str, Any]) -> None:
container = markdown.util.etree.SubElement(root, "div")
container.set("class", "message_embed")
img_link = extracted_data.get('image')
if img_link:
parsed_img_link = urllib.parse.urlparse(img_link)
# Append domain where relative img_link url is given
if not parsed_img_link.netloc:
parsed_url = urllib.parse.urlparse(link)
domain = '{url.scheme}://{url.netloc}/'.format(url=parsed_url)
img_link = urllib.parse.urljoin(domain, img_link)
img = markdown.util.etree.SubElement(container, "a")
img.set("style", "background-image: url(" + img_link + ")")
img.set("href", link)
img.set("target", "_blank")
img.set("class", "message_embed_image")
data_container = markdown.util.etree.SubElement(container, "div")
data_container.set("class", "data-container")
title = extracted_data.get('title')
if title:
title_elm = markdown.util.etree.SubElement(data_container, "div")
title_elm.set("class", "message_embed_title")
a = markdown.util.etree.SubElement(title_elm, "a")
a.set("href", link)
a.set("target", "_blank")
a.set("title", title)
a.text = title
description = extracted_data.get('description')
if description:
description_elm = markdown.util.etree.SubElement(data_container, "div")
description_elm.set("class", "message_embed_description")
description_elm.text = description
@cache_with_key(lambda tweet_id: tweet_id, cache_name="database", with_statsd_key="tweet_data")
def fetch_tweet_data(tweet_id: str) -> Optional[Dict[str, Any]]:
if settings.TEST_SUITE:
from . import testing_mocks
res = testing_mocks.twitter(tweet_id)
else:
creds = {
'consumer_key': settings.TWITTER_CONSUMER_KEY,
'consumer_secret': settings.TWITTER_CONSUMER_SECRET,
'access_token_key': settings.TWITTER_ACCESS_TOKEN_KEY,
'access_token_secret': settings.TWITTER_ACCESS_TOKEN_SECRET,
}
if not all(creds.values()):
return None
# We lazily import twitter here because its import process is
# surprisingly slow, and doing so has a significant impact on
# the startup performance of `manage.py` commands.
import twitter
try:
api = twitter.Api(tweet_mode='extended', **creds)
# Sometimes Twitter hangs on responses. Timing out here
# will cause the Tweet to go through as-is with no inline
# preview, rather than having the message be rejected
# entirely. This timeout needs to be less than our overall
# formatting timeout.
tweet = timeout(3, api.GetStatus, tweet_id)
res = tweet.AsDict()
except AttributeError:
bugdown_logger.error('Unable to load twitter api, you may have the wrong '
'library installed, see https://github.com/zulip/zulip/issues/86')
return None
except TimeoutExpired:
# We'd like to try again later and not cache the bad result,
# so we need to re-raise the exception (just as though
# we were being rate-limited)
raise
except twitter.TwitterError as e:
t = e.args[0]
if len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 34):
# Code 34 means that the message doesn't exist; return
# None so that we will cache the error
return None
elif len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 88 or
t[0]['code'] == 130):
# Code 88 means that we were rate-limited and 130
# means Twitter is having capacity issues; either way
# just raise the error so we don't cache None and will
# try again later.
raise
else:
# It's not clear what to do in cases of other errors,
# but for now it seems reasonable to log at error
# level (so that we get notified), but then cache the
# failure to proceed with our usual work
bugdown_logger.error(traceback.format_exc())
return None
return res
HEAD_START_RE = re.compile('^head[ >]')
HEAD_END_RE = re.compile('^/head[ >]')
META_START_RE = re.compile('^meta[ >]')
META_END_RE = re.compile('^/meta[ >]')
def fetch_open_graph_image(url: str) -> Optional[Dict[str, Any]]:
in_head = False
# HTML will auto close meta tags, when we start the next tag add
# a closing tag if it has not been closed yet.
last_closed = True
head = []
# TODO: What if response content is huge? Should we get headers first?
try:
content = requests.get(url, timeout=1).text
except Exception:
return None
# Extract the head and meta tags
# All meta tags are self closing, have no children or are closed
# automatically.
for part in content.split('<'):
if not in_head and HEAD_START_RE.match(part):
# Started the head node output it to have a document root
in_head = True
head.append('<head>')
elif in_head and HEAD_END_RE.match(part):
# Found the end of the head close any remaining tag then stop
# processing
in_head = False
if not last_closed:
last_closed = True
head.append('</meta>')
head.append('</head>')
break
elif in_head and META_START_RE.match(part):
# Found a meta node copy it
if not last_closed:
head.append('</meta>')
last_closed = True
head.append('<')
head.append(part)
if '/>' not in part:
last_closed = False
elif in_head and META_END_RE.match(part):
# End of a meta node just copy it to close the tag
head.append('<')
head.append(part)
last_closed = True
try:
doc = etree.fromstring(''.join(head))
except etree.ParseError:
return None
og_image = doc.find('meta[@property="og:image"]')
og_title = doc.find('meta[@property="og:title"]')
og_desc = doc.find('meta[@property="og:description"]')
title = None
desc = None
if og_image is not None:
image = og_image.get('content')
else:
return None
if og_title is not None:
title = og_title.get('content')
if og_desc is not None:
desc = og_desc.get('content')
return {'image': image, 'title': title, 'desc': desc}
def get_tweet_id(url: str) -> Optional[str]:
parsed_url = urllib.parse.urlparse(url)
if not (parsed_url.netloc == 'twitter.com' or parsed_url.netloc.endswith('.twitter.com')):
return None
to_match = parsed_url.path
# In old-style twitter.com/#!/wdaher/status/1231241234-style URLs,
# we need to look at the fragment instead
if parsed_url.path == '/' and len(parsed_url.fragment) > 5:
to_match = parsed_url.fragment
tweet_id_match = re.match(r'^!?/.*?/status(es)?/(?P<tweetid>\d{10,30})(/photo/[0-9])?/?$', to_match)
if not tweet_id_match:
return None
return tweet_id_match.group("tweetid")
class InlineHttpsProcessor(markdown.treeprocessors.Treeprocessor):
def run(self, root: Element) -> None:
# Get all URLs from the blob
found_imgs = walk_tree(root, lambda e: e if e.tag == "img" else None)
for img in found_imgs:
url = img.get("src")
if not url.startswith("http://"):
# Don't rewrite images on our own site (e.g. emoji).
continue
img.set("src", get_camo_url(url))
class BacktickPattern(markdown.inlinepatterns.Pattern):
""" Return a `<code>` element containing the matching text. """
def __init__(self, pattern: str) -> None:
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.ESCAPED_BSLASH = '%s%s%s' % (markdown.util.STX, ord('\\'), markdown.util.ETX)
self.tag = 'code'
def handleMatch(self, m: Match[str]) -> Union[str, Element]:
if m.group(4):
el = markdown.util.etree.Element(self.tag)
# Modified to not strip whitespace
el.text = markdown.util.AtomicString(m.group(4))
return el
else:
return m.group(2).replace('\\\\', self.ESCAPED_BSLASH)
class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):
TWITTER_MAX_IMAGE_HEIGHT = 400
TWITTER_MAX_TO_PREVIEW = 3
INLINE_PREVIEW_LIMIT_PER_MESSAGE = 5
def __init__(self, md: markdown.Markdown) -> None:
markdown.treeprocessors.Treeprocessor.__init__(self, md)
def get_actual_image_url(self, url: str) -> str:
# Add specific per-site cases to convert image-preview urls to image urls.
# See https://github.com/zulip/zulip/issues/4658 for more information
parsed_url = urllib.parse.urlparse(url)
if (parsed_url.netloc == 'github.com' or parsed_url.netloc.endswith('.github.com')):
# https://github.com/zulip/zulip/blob/master/static/images/logo/zulip-icon-128x128.png ->
# https://raw.githubusercontent.com/zulip/zulip/master/static/images/logo/zulip-icon-128x128.png
split_path = parsed_url.path.split('/')
if len(split_path) > 3 and split_path[3] == "blob":
return urllib.parse.urljoin('https://raw.githubusercontent.com',
'/'.join(split_path[0:3] + split_path[4:]))
return url
def is_image(self, url: str) -> bool:
if not self.markdown.image_preview_enabled:
return False
parsed_url = urllib.parse.urlparse(url)
# List from http://support.google.com/chromeos/bin/answer.py?hl=en&answer=183093
for ext in [".bmp", ".gif", ".jpg", "jpeg", ".png", ".webp"]:
if parsed_url.path.lower().endswith(ext):
return True
return False
def dropbox_image(self, url: str) -> Optional[Dict[str, Any]]:
# TODO: The returned Dict could possibly be a TypedDict in future.
parsed_url = urllib.parse.urlparse(url)
if (parsed_url.netloc == 'dropbox.com' or parsed_url.netloc.endswith('.dropbox.com')):
is_album = parsed_url.path.startswith('/sc/') or parsed_url.path.startswith('/photos/')
# Only allow preview Dropbox shared links
if not (parsed_url.path.startswith('/s/') or
parsed_url.path.startswith('/sh/') or
is_album):
return None
# Try to retrieve open graph protocol info for a preview
# This might be redundant right now for shared links for images.
# However, we might want to make use of title and description
# in the future. If the actual image is too big, we might also
# want to use the open graph image.
image_info = fetch_open_graph_image(url)
is_image = is_album or self.is_image(url)
# If it is from an album or not an actual image file,
# just use open graph image.
if is_album or not is_image:
# Failed to follow link to find an image preview so
# use placeholder image and guess filename
if image_info is None:
return None
image_info["is_image"] = is_image
return image_info
# Otherwise, try to retrieve the actual image.
# This is because open graph image from Dropbox may have padding
# and gifs do not work.
# TODO: What if image is huge? Should we get headers first?
if image_info is None:
image_info = dict()
image_info['is_image'] = True
parsed_url_list = list(parsed_url)
parsed_url_list[4] = "dl=1" # Replaces query
image_info["image"] = urllib.parse.urlunparse(parsed_url_list)
return image_info
return None
def youtube_id(self, url: str) -> Optional[str]:
if not self.markdown.image_preview_enabled:
return None
# Youtube video id extraction regular expression from http://pastebin.com/KyKAFv1s
# Slightly modified to support URLs of the form youtu.be/<id>
# If it matches, match.group(2) is the video id.
schema_re = r'(?:https?://)'
host_re = r'(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)'
param_re = r'(?:(?:(?:v|embed)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))'
id_re = r'([0-9A-Za-z_-]+)'
youtube_re = r'^({schema_re}?{host_re}{param_re}?)?{id_re}(?(1).+)?$'
youtube_re = youtube_re.format(schema_re=schema_re, host_re=host_re, id_re=id_re, param_re=param_re)
match = re.match(youtube_re, url)
if match is None:
return None
return match.group(2)
def youtube_image(self, url: str) -> Optional[str]:
yt_id = self.youtube_id(url)
if yt_id is not None:
return "https://i.ytimg.com/vi/%s/default.jpg" % (yt_id,)
return None
def vimeo_id(self, url: str) -> Optional[str]:
if not self.markdown.image_preview_enabled:
return None
#(http|https)?:\/\/(www\.)?vimeo.com\/(?:channels\/(?:\w+\/)?|groups\/([^\/]*)\/videos\/|)(\d+)(?:|\/\?)
# If it matches, match.group('id') is the video id.
vimeo_re = r'^((http|https)?:\/\/(www\.)?vimeo.com\/' + \
r'(?:channels\/(?:\w+\/)?|groups\/' + \
r'([^\/]*)\/videos\/|)(\d+)(?:|\/\?))$'
match = re.match(vimeo_re, url)
if match is None:
return None
return match.group(5)
def vimeo_title(self, extracted_data: Dict[str, Any]) -> Optional[str]:
title = extracted_data.get("title")
if title is not None:
return "Vimeo - {}".format(title)
return None
def twitter_text(self, text: str,
urls: List[Dict[str, str]],
user_mentions: List[Dict[str, Any]],
media: List[Dict[str, Any]]) -> Element:
"""
Use data from the twitter API to turn links, mentions and media into A
tags. Also convert unicode emojis to images.
This works by using the urls, user_mentions and media data from
the twitter API and searching for unicode emojis in the text using
`unicode_emoji_regex`.
The first step is finding the locations of the URLs, mentions, media and
emoji in the text. For each match we build a dictionary with type, the start
location, end location, the URL to link to, and the text(codepoint and title
in case of emojis) to be used in the link(image in case of emojis).
Next we sort the matches by start location. And for each we add the
text from the end of the last link to the start of the current link to
the output. The text needs to added to the text attribute of the first
node (the P tag) or the tail the last link created.
Finally we add any remaining text to the last node.
"""
to_process = [] # type: List[Dict[str, Any]]
# Build dicts for URLs
for url_data in urls:
short_url = url_data["url"]
full_url = url_data["expanded_url"]
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_process.append({
'type': 'url',
'start': match.start(),
'end': match.end(),
'url': short_url,
'text': full_url,
})
# Build dicts for mentions
for user_mention in user_mentions:
screen_name = user_mention['screen_name']
mention_string = '@' + screen_name
for match in re.finditer(re.escape(mention_string), text, re.IGNORECASE):
to_process.append({
'type': 'mention',
'start': match.start(),
'end': match.end(),
'url': 'https://twitter.com/' + urllib.parse.quote(screen_name),
'text': mention_string,
})
# Build dicts for media
for media_item in media:
short_url = media_item['url']
expanded_url = media_item['expanded_url']
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_process.append({
'type': 'media',
'start': match.start(),
'end': match.end(),
'url': short_url,
'text': expanded_url,
})
# Build dicts for emojis
for match in re.finditer(unicode_emoji_regex, text, re.IGNORECASE):
orig_syntax = match.group('syntax')
codepoint = unicode_emoji_to_codepoint(orig_syntax)
if codepoint in codepoint_to_name:
display_string = ':' + codepoint_to_name[codepoint] + ':'
to_process.append({
'type': 'emoji',
'start': match.start(),
'end': match.end(),
'codepoint': codepoint,
'title': display_string,
})
to_process.sort(key=lambda x: x['start'])
p = current_node = markdown.util.etree.Element('p')
def set_text(text: str) -> None:
"""
Helper to set the text or the tail of the current_node
"""
if current_node == p:
current_node.text = text
else:
current_node.tail = text
db_data = self.markdown.zulip_db_data
current_index = 0
for item in to_process:
# The text we want to link starts in already linked text skip it
if item['start'] < current_index:
continue
# Add text from the end of last link to the start of the current
# link
set_text(text[current_index:item['start']])
current_index = item['end']
if item['type'] != 'emoji':
current_node = elem = url_to_a(db_data, item['url'], item['text'])
else:
current_node = elem = make_emoji(item['codepoint'], item['title'])
p.append(elem)
# Add any unused text
set_text(text[current_index:])
return p
def twitter_link(self, url: str) -> Optional[Element]:
tweet_id = get_tweet_id(url)
if tweet_id is None:
return None
try:
res = fetch_tweet_data(tweet_id)
if res is None:
return None
user = res['user'] # type: Dict[str, Any]
tweet = markdown.util.etree.Element("div")
tweet.set("class", "twitter-tweet")
img_a = markdown.util.etree.SubElement(tweet, 'a')
img_a.set("href", url)
img_a.set("target", "_blank")
profile_img = markdown.util.etree.SubElement(img_a, 'img')
profile_img.set('class', 'twitter-avatar')
# For some reason, for, e.g. tweet 285072525413724161,
# python-twitter does not give us a
# profile_image_url_https, but instead puts that URL in
# profile_image_url. So use _https if available, but fall
# back gracefully.
image_url = user.get('profile_image_url_https', user['profile_image_url'])
profile_img.set('src', image_url)
text = html.unescape(res['full_text'])
urls = res.get('urls', [])
user_mentions = res.get('user_mentions', [])
media = res.get('media', []) # type: List[Dict[str, Any]]
p = self.twitter_text(text, urls, user_mentions, media)
tweet.append(p)
span = markdown.util.etree.SubElement(tweet, 'span')
span.text = "- %s (@%s)" % (user['name'], user['screen_name'])
# Add image previews
for media_item in media:
# Only photos have a preview image
if media_item['type'] != 'photo':
continue
# Find the image size that is smaller than
# TWITTER_MAX_IMAGE_HEIGHT px tall or the smallest
size_name_tuples = list(media_item['sizes'].items())
size_name_tuples.sort(reverse=True,
key=lambda x: x[1]['h'])
for size_name, size in size_name_tuples:
if size['h'] < self.TWITTER_MAX_IMAGE_HEIGHT:
break
media_url = '%s:%s' % (media_item['media_url_https'], size_name)
img_div = markdown.util.etree.SubElement(tweet, 'div')
img_div.set('class', 'twitter-image')
img_a = markdown.util.etree.SubElement(img_div, 'a')
img_a.set('href', media_item['url'])
img_a.set('target', '_blank')
img_a.set('title', media_item['url'])
img = markdown.util.etree.SubElement(img_a, 'img')
img.set('src', media_url)
return tweet
except Exception:
# We put this in its own try-except because it requires external
# connectivity. If Twitter flakes out, we don't want to not-render
# the entire message; we just want to not show the Twitter preview.
bugdown_logger.warning(traceback.format_exc())
return None
def get_url_data(self, e: Element) -> Optional[Tuple[str, str]]:
if e.tag == "a":
if e.text is not None:
return (e.get("href"), e.text)
return (e.get("href"), e.get("href"))
return None
def handle_image_inlining(self, root: Element, found_url: ResultWithFamily) -> None:
grandparent = found_url.family.grandparent
parent = found_url.family.parent
ahref_element = found_url.family.child
(url, text) = found_url.result
actual_url = self.get_actual_image_url(url)
# url != text usually implies a named link, which we opt not to remove
url_eq_text = (url == text)
if parent.tag == 'li':
add_a(parent, self.get_actual_image_url(url), url, title=text)
if not parent.text and not ahref_element.tail and url_eq_text:
parent.remove(ahref_element)
elif parent.tag == 'p':
parent_index = None
for index, uncle in enumerate(grandparent.getchildren()):
if uncle is parent:
parent_index = index
break
if parent_index is not None:
ins_index = self.find_proper_insertion_index(grandparent, parent, parent_index)
add_a(grandparent, actual_url, url, title=text, insertion_index=ins_index)
else:
# We're not inserting after parent, since parent not found.
# Append to end of list of grandparent's children as normal
add_a(grandparent, actual_url, url, title=text)
# If link is alone in a paragraph, delete paragraph containing it
if (len(parent.getchildren()) == 1 and
(not parent.text or parent.text == "\n") and
not ahref_element.tail and
url_eq_text):
grandparent.remove(parent)
else:
# If none of the above criteria match, fall back to old behavior
add_a(root, actual_url, url, title=text)
def find_proper_insertion_index(self, grandparent: Element, parent: Element,
parent_index_in_grandparent: int) -> int:
# If there are several inline images from same paragraph, ensure that
# they are in correct (and not opposite) order by inserting after last
# inline image from paragraph 'parent'
uncles = grandparent.getchildren()
parent_links = [ele.attrib['href'] for ele in parent.iter(tag="a")]
insertion_index = parent_index_in_grandparent
while True:
insertion_index += 1
if insertion_index >= len(uncles):
return insertion_index
uncle = uncles[insertion_index]
inline_image_classes = ['message_inline_image', 'message_inline_ref']
if (
uncle.tag != 'div' or
'class' not in uncle.keys() or
uncle.attrib['class'] not in inline_image_classes
):
return insertion_index
uncle_link = list(uncle.iter(tag="a"))[0].attrib['href']
if uncle_link not in parent_links:
return insertion_index
def is_absolute_url(self, url: str) -> bool:
return bool(urllib.parse.urlparse(url).netloc)
def run(self, root: Element) -> None:
# Get all URLs from the blob
found_urls = walk_tree_with_family(root, self.get_url_data)
if len(found_urls) == 0 or len(found_urls) > self.INLINE_PREVIEW_LIMIT_PER_MESSAGE:
return
rendered_tweet_count = 0
for found_url in found_urls:
(url, text) = found_url.result
if not self.is_absolute_url(url):
if self.is_image(url):
self.handle_image_inlining(root, found_url)
# We don't have a strong use case for doing url preview for relative links.
continue
dropbox_image = self.dropbox_image(url)
if dropbox_image is not None:
class_attr = "message_inline_ref"
is_image = dropbox_image["is_image"]
if is_image:
class_attr = "message_inline_image"
# Not making use of title and description of images
add_a(root, dropbox_image['image'], url,
title=dropbox_image.get('title', ""),
desc=dropbox_image.get('desc', ""),
class_attr=class_attr,
already_thumbnailed=True)
continue
if self.is_image(url):
self.handle_image_inlining(root, found_url)
continue
if get_tweet_id(url) is not None:
if rendered_tweet_count >= self.TWITTER_MAX_TO_PREVIEW:
# Only render at most one tweet per message
continue
twitter_data = self.twitter_link(url)
if twitter_data is None:
# This link is not actually a tweet known to twitter
continue
rendered_tweet_count += 1
div = markdown.util.etree.SubElement(root, "div")
div.set("class", "inline-preview-twitter")
div.insert(0, twitter_data)
continue
youtube = self.youtube_image(url)
if youtube is not None:
yt_id = self.youtube_id(url)
add_a(root, youtube, url, None, None,
"youtube-video message_inline_image",
yt_id, already_thumbnailed=True)
continue
db_data = self.markdown.zulip_db_data
if db_data and db_data['sent_by_bot']:
continue
if not self.markdown.url_embed_preview_enabled:
continue
try:
extracted_data = link_preview.link_embed_data_from_cache(url)
except NotFoundInCache:
self.markdown.zulip_message.links_for_preview.add(url)
continue
if extracted_data:
vm_id = self.vimeo_id(url)
if vm_id is not None:
vimeo_image = extracted_data.get('image')
vimeo_title = self.vimeo_title(extracted_data)
if vimeo_image is not None:
add_a(root, vimeo_image, url, vimeo_title,
None, "vimeo-video message_inline_image", vm_id,
already_thumbnailed=True)
if vimeo_title is not None:
found_url.family.child.text = vimeo_title
else:
add_embed(root, url, extracted_data)
class Avatar(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
img = markdown.util.etree.Element('img')
email_address = match.group('email')
email = email_address.strip().lower()
profile_id = None
db_data = self.markdown.zulip_db_data
if db_data is not None:
user_dict = db_data['email_info'].get(email)
if user_dict is not None:
profile_id = user_dict['id']
img.set('class', 'message_body_gravatar')
img.set('src', '/avatar/{0}?s=30'.format(profile_id or email))
img.set('title', email)
img.set('alt', email)
return img
def possible_avatar_emails(content: str) -> Set[str]:
emails = set()
for REGEX in [AVATAR_REGEX, GRAVATAR_REGEX]:
matches = re.findall(REGEX, content)
for email in matches:
if email:
emails.add(email)
return emails
path_to_name_to_codepoint = os.path.join(settings.STATIC_ROOT,
"generated", "emoji", "name_to_codepoint.json")
with open(path_to_name_to_codepoint) as name_to_codepoint_file:
name_to_codepoint = ujson.load(name_to_codepoint_file)
path_to_codepoint_to_name = os.path.join(settings.STATIC_ROOT,
"generated", "emoji", "codepoint_to_name.json")
with open(path_to_codepoint_to_name) as codepoint_to_name_file:
codepoint_to_name = ujson.load(codepoint_to_name_file)
# All of our emojis(non ZWJ sequences) belong to one of these unicode blocks:
# \U0001f100-\U0001f1ff - Enclosed Alphanumeric Supplement
# \U0001f200-\U0001f2ff - Enclosed Ideographic Supplement
# \U0001f300-\U0001f5ff - Miscellaneous Symbols and Pictographs
# \U0001f600-\U0001f64f - Emoticons (Emoji)
# \U0001f680-\U0001f6ff - Transport and Map Symbols
# \U0001f900-\U0001f9ff - Supplemental Symbols and Pictographs
# \u2000-\u206f - General Punctuation
# \u2300-\u23ff - Miscellaneous Technical
# \u2400-\u243f - Control Pictures
# \u2440-\u245f - Optical Character Recognition
# \u2460-\u24ff - Enclosed Alphanumerics
# \u2500-\u257f - Box Drawing
# \u2580-\u259f - Block Elements
# \u25a0-\u25ff - Geometric Shapes
# \u2600-\u26ff - Miscellaneous Symbols
# \u2700-\u27bf - Dingbats
# \u2900-\u297f - Supplemental Arrows-B
# \u2b00-\u2bff - Miscellaneous Symbols and Arrows
# \u3000-\u303f - CJK Symbols and Punctuation
# \u3200-\u32ff - Enclosed CJK Letters and Months
unicode_emoji_regex = '(?P<syntax>['\
'\U0001F100-\U0001F64F' \
'\U0001F680-\U0001F6FF' \
'\U0001F900-\U0001F9FF' \
'\u2000-\u206F' \
'\u2300-\u27BF' \
'\u2900-\u297F' \
'\u2B00-\u2BFF' \
'\u3000-\u303F' \
'\u3200-\u32FF' \
'])'
# The equivalent JS regex is \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f]|\ud83d[\ude80-\udeff]|
# \ud83e[\udd00-\uddff]|[\u2000-\u206f]|[\u2300-\u27bf]|[\u2b00-\u2bff]|[\u3000-\u303f]|
# [\u3200-\u32ff]. See below comments for explanation. The JS regex is used by marked.js for
# frontend unicode emoji processing.
# The JS regex \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f] represents U0001f100-\U0001f64f
# The JS regex \ud83d[\ude80-\udeff] represents \U0001f680-\U0001f6ff
# The JS regex \ud83e[\udd00-\uddff] represents \U0001f900-\U0001f9ff
# The JS regex [\u2000-\u206f] represents \u2000-\u206f
# The JS regex [\u2300-\u27bf] represents \u2300-\u27bf
# Similarly other JS regexes can be mapped to the respective unicode blocks.
# For more information, please refer to the following article:
# http://crocodillon.com/blog/parsing-emoji-unicode-in-javascript
def make_emoji(codepoint: str, display_string: str) -> Element:
# Replace underscore in emoji's title with space
title = display_string[1:-1].replace("_", " ")
span = markdown.util.etree.Element('span')
span.set('class', 'emoji emoji-%s' % (codepoint,))
span.set('title', title)
span.set('role', 'img')
span.set('aria-label', title)
span.text = display_string
return span
def make_realm_emoji(src: str, display_string: str) -> Element:
elt = markdown.util.etree.Element('img')
elt.set('src', src)
elt.set('class', 'emoji')
elt.set("alt", display_string)
elt.set("title", display_string[1:-1].replace("_", " "))
return elt
def unicode_emoji_to_codepoint(unicode_emoji: str) -> str:
codepoint = hex(ord(unicode_emoji))[2:]
# Unicode codepoints are minimum of length 4, padded
# with zeroes if the length is less than zero.
while len(codepoint) < 4:
codepoint = '0' + codepoint
return codepoint
class EmoticonTranslation(markdown.inlinepatterns.Pattern):
""" Translates emoticons like `:)` into emoji like `:smile:`. """
def handleMatch(self, match: Match[str]) -> Optional[Element]:
db_data = self.markdown.zulip_db_data
if db_data is None or not db_data['translate_emoticons']:
return None
emoticon = match.group('emoticon')
translated = translate_emoticons(emoticon)
name = translated[1:-1]
return make_emoji(name_to_codepoint[name], translated)
class UnicodeEmoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
orig_syntax = match.group('syntax')
codepoint = unicode_emoji_to_codepoint(orig_syntax)
if codepoint in codepoint_to_name:
display_string = ':' + codepoint_to_name[codepoint] + ':'
return make_emoji(codepoint, display_string)
else:
return None
class Emoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
orig_syntax = match.group("syntax")
name = orig_syntax[1:-1]
active_realm_emoji = {} # type: Dict[str, Dict[str, str]]
db_data = self.markdown.zulip_db_data
if db_data is not None:
active_realm_emoji = db_data['active_realm_emoji']
if self.markdown.zulip_message and name in active_realm_emoji:
return make_realm_emoji(active_realm_emoji[name]['source_url'], orig_syntax)
elif name == 'zulip':
return make_realm_emoji('/static/generated/emoji/images/emoji/unicode/zulip.png', orig_syntax)
elif name in name_to_codepoint:
return make_emoji(name_to_codepoint[name], orig_syntax)
else:
return None
def content_has_emoji_syntax(content: str) -> bool:
return re.search(EMOJI_REGEX, content) is not None
class ModalLink(markdown.inlinepatterns.Pattern):
"""
A pattern that allows including in-app modal links in messages.
"""
def handleMatch(self, match: Match[str]) -> Element:
relative_url = match.group('relative_url')
text = match.group('text')
a_tag = markdown.util.etree.Element("a")
a_tag.set("href", relative_url)
a_tag.set("title", relative_url)
a_tag.text = text
return a_tag
class Tex(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Element:
rendered = render_tex(match.group('body'), is_inline=True)
if rendered is not None:
return etree.fromstring(rendered.encode('utf-8'))
else: # Something went wrong while rendering
span = markdown.util.etree.Element('span')
span.set('class', 'tex-error')
span.text = '$$' + match.group('body') + '$$'
return span
upload_title_re = re.compile("^(https?://[^/]*)?(/user_uploads/\\d+)(/[^/]*)?/[^/]*/(?P<filename>[^/]*)$")
def url_filename(url: str) -> str:
"""Extract the filename if a URL is an uploaded file, or return the original URL"""
match = upload_title_re.match(url)
if match:
return match.group('filename')
else:
return url
def fixup_link(link: markdown.util.etree.Element, target_blank: bool=True) -> None:
"""Set certain attributes we want on every link."""
if target_blank:
link.set('target', '_blank')
link.set('title', url_filename(link.get('href')))
def sanitize_url(url: str) -> Optional[str]:
"""
Sanitize a url against xss attacks.
See the docstring on markdown.inlinepatterns.LinkPattern.sanitize_url.
"""
try:
parts = urllib.parse.urlparse(url.replace(' ', '%20'))
scheme, netloc, path, params, query, fragment = parts
except ValueError:
# Bad url - so bad it couldn't be parsed.
return ''
# If there is no scheme or netloc and there is a '@' in the path,
# treat it as a mailto: and set the appropriate scheme
if scheme == '' and netloc == '' and '@' in path:
scheme = 'mailto'
elif scheme == '' and netloc == '' and len(path) > 0 and path[0] == '/':
# Allow domain-relative links
return urllib.parse.urlunparse(('', '', path, params, query, fragment))
elif (scheme, netloc, path, params, query) == ('', '', '', '', '') and len(fragment) > 0:
# Allow fragment links
return urllib.parse.urlunparse(('', '', '', '', '', fragment))
# Zulip modification: If scheme is not specified, assume http://
# We re-enter sanitize_url because netloc etc. need to be re-parsed.
if not scheme:
return sanitize_url('http://' + url)
locless_schemes = ['mailto', 'news', 'file', 'bitcoin']
if netloc == '' and scheme not in locless_schemes:
# This fails regardless of anything else.
# Return immediately to save additional processing
return None
# Upstream code will accept a URL like javascript://foo because it
# appears to have a netloc. Additionally there are plenty of other
# schemes that do weird things like launch external programs. To be
# on the safe side, we whitelist the scheme.
if scheme not in ('http', 'https', 'ftp', 'mailto', 'file', 'bitcoin'):
return None
# Upstream code scans path, parameters, and query for colon characters
# because
#
# some aliases [for javascript:] will appear to urllib.parse to have
# no scheme. On top of that relative links (i.e.: "foo/bar.html")
# have no scheme.
#
# We already converted an empty scheme to http:// above, so we skip
# the colon check, which would also forbid a lot of legitimate URLs.
# Url passes all tests. Return url as-is.
return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment))
def url_to_a(db_data: Optional[DbData], url: str, text: Optional[str]=None) -> Union[Element, str]:
a = markdown.util.etree.Element('a')
href = sanitize_url(url)
target_blank = True
if href is None:
# Rejected by sanitize_url; render it as plain text.
return url
if text is None:
text = markdown.util.AtomicString(url)
href = rewrite_local_links_to_relative(db_data, href)
target_blank = not href.startswith("#narrow") and not href.startswith('mailto:')
a.set('href', href)
a.text = text
fixup_link(a, target_blank)
return a
class CompiledPattern(markdown.inlinepatterns.Pattern):
def __init__(self, compiled_re: Pattern, md: markdown.Markdown) -> None:
# This is similar to the superclass's small __init__ function,
# but we skip the compilation step and let the caller give us
# a compiled regex.
self.compiled_re = compiled_re
self.md = md
class AutoLink(CompiledPattern):
def handleMatch(self, match: Match[str]) -> ElementStringNone:
url = match.group('url')
db_data = self.markdown.zulip_db_data
return url_to_a(db_data, url)
class UListProcessor(markdown.blockprocessors.UListProcessor):
""" Process unordered list blocks.
Based on markdown.blockprocessors.UListProcessor, but does not accept
'+' or '-' as a bullet character."""
TAG = 'ul'
RE = re.compile('^[ ]{0,3}[*][ ]+(.*)')
def __init__(self, parser: Any) -> None:
# HACK: Set the tab length to 2 just for the initialization of
# this class, so that bulleted lists (and only bulleted lists)
# work off 2-space indentation.
parser.markdown.tab_length = 2
super().__init__(parser)
parser.markdown.tab_length = 4
class ListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):
""" Process unordered list blocks.
Based on markdown.blockprocessors.ListIndentProcessor, but with 2-space indent
"""
def __init__(self, parser: Any) -> None:
# HACK: Set the tab length to 2 just for the initialization of
# this class, so that bulleted lists (and only bulleted lists)
# work off 2-space indentation.
parser.markdown.tab_length = 2
super().__init__(parser)
parser.markdown.tab_length = 4
class BlockQuoteProcessor(markdown.blockprocessors.BlockQuoteProcessor):
""" Process BlockQuotes.
Based on markdown.blockprocessors.BlockQuoteProcessor, but with 2-space indent
"""
# Original regex for blockquote is RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
RE = re.compile(r'(^|\n)(?!(?:[ ]{0,3}>\s*(?:$|\n))*(?:$|\n))'
r'[ ]{0,3}>[ ]?(.*)')
mention_re = re.compile(mention.find_mentions)
def clean(self, line: str) -> str:
# Silence all the mentions inside blockquotes
line = re.sub(self.mention_re, lambda m: "@_{}".format(m.group('match')), line)
# And then run the upstream processor's code for removing the '>'
return super().clean(line)
class BugdownUListPreprocessor(markdown.preprocessors.Preprocessor):
""" Allows unordered list blocks that come directly after a
paragraph to be rendered as an unordered list
Detects paragraphs that have a matching list item that comes
directly after a line of text, and inserts a newline between
to satisfy Markdown"""
LI_RE = re.compile('^[ ]{0,3}[*][ ]+(.*)', re.MULTILINE)
HANGING_ULIST_RE = re.compile('^.+\\n([ ]{0,3}[*][ ]+.*)', re.MULTILINE)
def run(self, lines: List[str]) -> List[str]:
""" Insert a newline between a paragraph and ulist if missing """
inserts = 0
fence = None
copy = lines[:]
for i in range(len(lines) - 1):
# Ignore anything that is inside a fenced code block
m = FENCE_RE.match(lines[i])
if not fence and m:
fence = m.group('fence')
elif fence and m and fence == m.group('fence'):
fence = None
# If we're not in a fenced block and we detect an upcoming list
# hanging off a paragraph, add a newline
if (not fence and lines[i] and
self.LI_RE.match(lines[i+1]) and
not self.LI_RE.match(lines[i])):
copy.insert(i+inserts+1, '')
inserts += 1
return copy
class AutoNumberOListPreprocessor(markdown.preprocessors.Preprocessor):
""" Finds a sequence of lines numbered by the same number"""
RE = re.compile(r'^([ ]*)(\d+)\.[ ]+(.*)')
TAB_LENGTH = 2
def run(self, lines: List[str]) -> List[str]:
new_lines = [] # type: List[str]
current_list = [] # type: List[Match[str]]
current_indent = 0
for line in lines:
m = self.RE.match(line)
# Remember if this line is a continuation of already started list
is_next_item = (m and current_list
and current_indent == len(m.group(1)) // self.TAB_LENGTH)
if not is_next_item:
# There is no more items in the list we were processing
new_lines.extend(self.renumber(current_list))
current_list = []
if not m:
# Ordinary line
new_lines.append(line)
elif is_next_item:
# Another list item
current_list.append(m)
else:
# First list item
current_list = [m]
current_indent = len(m.group(1)) // self.TAB_LENGTH
new_lines.extend(self.renumber(current_list))
return new_lines
def renumber(self, mlist: List[Match[str]]) -> List[str]:
if not mlist:
return []
start_number = int(mlist[0].group(2))
# Change numbers only if every one is the same
change_numbers = True
for m in mlist:
if int(m.group(2)) != start_number:
change_numbers = False
break
lines = [] # type: List[str]
counter = start_number
for m in mlist:
number = str(counter) if change_numbers else m.group(2)
lines.append('%s%s. %s' % (m.group(1), number, m.group(3)))
counter += 1
return lines
# We need the following since upgrade from py-markdown 2.6.11 to 3.0.1
# modifies the link handling significantly. The following is taken from
# py-markdown 2.6.11 markdown/inlinepatterns.py.
@one_time
def get_link_re() -> str:
'''
Very important--if you need to change this code to depend on
any arguments, you must eliminate the "one_time" decorator
and consider performance implications. We only want to compute
this value once.
'''
NOBRACKET = r'[^\]\[]*'
BRK = (
r'\[(' +
(NOBRACKET + r'(\[')*6 +
(NOBRACKET + r'\])*')*6 +
NOBRACKET + r')\]'
)
NOIMG = r'(?<!\!)'
# [text](url) or [text](<url>) or [text](url "title")
LINK_RE = NOIMG + BRK + \
r'''\(\s*(<(?:[^<>\\]|\\.)*>|(\([^()]*\)|[^()])*?)\s*(('(?:[^'\\]|\\.)*'|"(?:[^"\\]|\\.)*")\s*)?\)'''
return normal_compile(LINK_RE)
def prepare_realm_pattern(source: str) -> str:
""" Augment a realm filter so it only matches after start-of-string,
whitespace, or opening delimiters, won't match if there are word
characters directly after, and saves what was matched as "name". """
return r"""(?<![^\s'"\(,:<])(?P<name>""" + source + r')(?!\w)'
# Given a regular expression pattern, linkifies groups that match it
# using the provided format string to construct the URL.
class RealmFilterPattern(markdown.inlinepatterns.Pattern):
""" Applied a given realm filter to the input """
def __init__(self, source_pattern: str,
format_string: str,
markdown_instance: Optional[markdown.Markdown]=None) -> None:
self.pattern = prepare_realm_pattern(source_pattern)
self.format_string = format_string
markdown.inlinepatterns.Pattern.__init__(self, self.pattern, markdown_instance)
def handleMatch(self, m: Match[str]) -> Union[Element, str]:
db_data = self.markdown.zulip_db_data
return url_to_a(db_data,
self.format_string % m.groupdict(),
m.group("name"))
class UserMentionPattern(markdown.inlinepatterns.Pattern):
def handleMatch(self, m: Match[str]) -> Optional[Element]:
match = m.group('match')
silent = m.group('silent') == '_'
db_data = self.markdown.zulip_db_data
if self.markdown.zulip_message and db_data is not None:
if match.startswith("**") and match.endswith("**"):
name = match[2:-2]
else:
return None
wildcard = mention.user_mention_matches_wildcard(name)
id_syntax_match = re.match(r'.+\|(?P<user_id>\d+)$', name)
if id_syntax_match:
id = id_syntax_match.group("user_id")
user = db_data['mention_data'].get_user_by_id(id)
else:
user = db_data['mention_data'].get_user_by_name(name)
if wildcard:
self.markdown.zulip_message.mentions_wildcard = True
user_id = "*"
elif user:
if not silent:
self.markdown.zulip_message.mentions_user_ids.add(user['id'])
name = user['full_name']
user_id = str(user['id'])
else:
# Don't highlight @mentions that don't refer to a valid user
return None
el = markdown.util.etree.Element("span")
el.set('data-user-id', user_id)
if silent:
el.set('class', 'user-mention silent')
el.text = "%s" % (name,)
else:
el.set('class', 'user-mention')
el.text = "@%s" % (name,)
return el
return None
class UserGroupMentionPattern(markdown.inlinepatterns.Pattern):
def handleMatch(self, m: Match[str]) -> Optional[Element]:
match = m.group(2)
db_data = self.markdown.zulip_db_data
if self.markdown.zulip_message and db_data is not None:
name = extract_user_group(match)
user_group = db_data['mention_data'].get_user_group(name)
if user_group:
self.markdown.zulip_message.mentions_user_group_ids.add(user_group.id)
name = user_group.name
user_group_id = str(user_group.id)
else:
# Don't highlight @-mentions that don't refer to a valid user
# group.
return None
el = markdown.util.etree.Element("span")
el.set('class', 'user-group-mention')
el.set('data-user-group-id', user_group_id)
el.text = "@%s" % (name,)
return el
return None
class StreamPattern(CompiledPattern):
def find_stream_by_name(self, name: Match[str]) -> Optional[Dict[str, Any]]:
db_data = self.markdown.zulip_db_data
if db_data is None:
return None
stream = db_data['stream_names'].get(name)
return stream
def handleMatch(self, m: Match[str]) -> Optional[Element]:
name = m.group('stream_name')
if self.markdown.zulip_message:
stream = self.find_stream_by_name(name)
if stream is None:
return None
el = markdown.util.etree.Element('a')
el.set('class', 'stream')
el.set('data-stream-id', str(stream['id']))
# TODO: We should quite possibly not be specifying the
# href here and instead having the browser auto-add the
# href when it processes a message with one of these, to
# provide more clarity to API clients.
stream_url = encode_stream(stream['id'], name)
el.set('href', '/#narrow/stream/{stream_url}'.format(stream_url=stream_url))
el.text = '#{stream_name}'.format(stream_name=name)
return el
return None
def possible_linked_stream_names(content: str) -> Set[str]:
matches = re.findall(STREAM_LINK_REGEX, content, re.VERBOSE)
return set(matches)
class AlertWordsNotificationProcessor(markdown.preprocessors.Preprocessor):
def run(self, lines: Iterable[str]) -> Iterable[str]:
db_data = self.markdown.zulip_db_data
if self.markdown.zulip_message and db_data is not None:
# We check for alert words here, the set of which are
# dependent on which users may see this message.
#
# Our caller passes in the list of possible_words. We
# don't do any special rendering; we just append the alert words
# we find to the set self.markdown.zulip_message.alert_words.
realm_words = db_data['possible_words']
content = '\n'.join(lines).lower()
allowed_before_punctuation = "|".join([r'\s', '^', r'[\(\".,\';\[\*`>]'])
allowed_after_punctuation = "|".join([r'\s', '$', r'[\)\"\?:.,\';\]!\*`]'])
for word in realm_words:
escaped = re.escape(word.lower())
match_re = re.compile('(?:%s)%s(?:%s)' %
(allowed_before_punctuation,
escaped,
allowed_after_punctuation))
if re.search(match_re, content):
self.markdown.zulip_message.alert_words.add(word)
return lines
# This prevents realm_filters from running on the content of a
# Markdown link, breaking up the link. This is a monkey-patch, but it
# might be worth sending a version of this change upstream.
class AtomicLinkPattern(CompiledPattern):
def get_element(self, m: Match[str]) -> Optional[Element]:
href = m.group(9)
if not href:
return None
if href[0] == "<":
href = href[1:-1]
href = sanitize_url(self.unescape(href.strip()))
if href is None:
return None
db_data = self.markdown.zulip_db_data
href = rewrite_local_links_to_relative(db_data, href)
el = markdown.util.etree.Element('a')
el.text = m.group(2)
el.set('href', href)
fixup_link(el, target_blank=(href[:1] != '#'))
return el
def handleMatch(self, m: Match[str]) -> Optional[Element]:
ret = self.get_element(m)
if ret is None:
return None
if not isinstance(ret, str):
ret.text = markdown.util.AtomicString(ret.text)
return ret
def get_sub_registry(r: markdown.util.Registry, keys: List[str]) -> markdown.util.Registry:
# Registry is a new class added by py-markdown to replace Ordered List.
# Since Registry doesn't support .keys(), it is easier to make a new
# object instead of removing keys from the existing object.
new_r = markdown.util.Registry()
for k in keys:
new_r.register(r[k], k, r.get_index_for_name(k))
return new_r
# These are used as keys ("realm_filters_keys") to md_engines and the respective
# realm filter caches
DEFAULT_BUGDOWN_KEY = -1
ZEPHYR_MIRROR_BUGDOWN_KEY = -2
class Bugdown(markdown.Markdown):
def __init__(self, *args: Any, **kwargs: Union[bool, int, List[Any]]) -> None:
# define default configs
self.config = {
"realm_filters": [kwargs['realm_filters'],
"Realm-specific filters for realm_filters_key %s" % (kwargs['realm'],)],
"realm": [kwargs['realm'], "Realm id"],
"code_block_processor_disabled": [kwargs['code_block_processor_disabled'],
"Disabled for email gateway"]
}
super().__init__(*args, **kwargs)
self.set_output_format('html')
def build_parser(self) -> markdown.Markdown:
# Build the parser using selected default features from py-markdown.
# The complete list of all available processors can be found in the
# super().build_parser() function.
#
# Note: for any py-markdown updates, manually check if we want any
# of the new features added upstream or not; they wouldn't get
# included by default.
self.preprocessors = self.build_preprocessors()
self.parser = self.build_block_parser()
self.inlinePatterns = self.build_inlinepatterns()
self.treeprocessors = self.build_treeprocessors()
self.postprocessors = self.build_postprocessors()
self.handle_zephyr_mirror()
return self
def build_preprocessors(self) -> markdown.util.Registry:
# We disable the following preprocessors from upstream:
#
# html_block - insecure
# reference - references don't make sense in a chat context.
preprocessors = markdown.util.Registry()
preprocessors.register(AutoNumberOListPreprocessor(self), 'auto_number_olist', 40)
preprocessors.register(BugdownUListPreprocessor(self), 'hanging_ulists', 35)
preprocessors.register(markdown.preprocessors.NormalizeWhitespace(self), 'normalize_whitespace', 30)
preprocessors.register(fenced_code.FencedBlockPreprocessor(self), 'fenced_code_block', 25)
preprocessors.register(AlertWordsNotificationProcessor(self), 'custom_text_notifications', 20)
return preprocessors
def build_block_parser(self) -> markdown.util.Registry:
# We disable the following blockparsers from upstream:
#
# indent - replaced by ours
# hashheader - disabled, since headers look bad and don't make sense in a chat context.
# setextheader - disabled, since headers look bad and don't make sense in a chat context.
# olist - replaced by ours
# ulist - replaced by ours
# quote - replaced by ours
parser = markdown.blockprocessors.BlockParser(self)
parser.blockprocessors.register(markdown.blockprocessors.EmptyBlockProcessor(parser), 'empty', 85)
if not self.getConfig('code_block_processor_disabled'):
parser.blockprocessors.register(markdown.blockprocessors.CodeBlockProcessor(parser), 'code', 80)
# We get priority 75 from 'table' extension
parser.blockprocessors.register(markdown.blockprocessors.HRProcessor(parser), 'hr', 70)
parser.blockprocessors.register(UListProcessor(parser), 'ulist', 65)
parser.blockprocessors.register(ListIndentProcessor(parser), 'indent', 60)
parser.blockprocessors.register(BlockQuoteProcessor(parser), 'quote', 55)
parser.blockprocessors.register(markdown.blockprocessors.ParagraphProcessor(parser), 'paragraph', 50)
return parser
def build_inlinepatterns(self) -> markdown.util.Registry:
# We disable the following upstream inline patterns:
#
# backtick - replaced by ours
# escape - probably will re-add at some point.
# link - replaced by ours
# image_link - replaced by ours
# autolink - replaced by ours
# automail - replaced by ours
# linebreak - we use nl2br and consider that good enough
# html - insecure
# reference - references not useful
# image_reference - references not useful
# short_reference - references not useful
# ---------------------------------------------------
# strong_em - for these three patterns,
# strong2 - we have our own versions where
# emphasis2 - we disable _ for bold and emphasis
# Declare regexes for clean single line calls to .register().
NOT_STRONG_RE = markdown.inlinepatterns.NOT_STRONG_RE
# Custom strikethrough syntax: ~~foo~~
DEL_RE = r'(?<!~)(\~\~)([^~\n]+?)(\~\~)(?!~)'
# Custom bold syntax: **foo** but not __foo__
# str inside ** must start and end with a word character
# it need for things like "const char *x = (char *)y"
EMPHASIS_RE = r'(\*)(?!\s+)([^\*^\n]+)(?<!\s)\*'
ENTITY_RE = markdown.inlinepatterns.ENTITY_RE
STRONG_EM_RE = r'(\*\*\*)(?!\s+)([^\*^\n]+)(?<!\s)\*\*\*'
# Inline code block without whitespace stripping
BACKTICK_RE = r'(?:(?<!\\)((?:\\{2})+)(?=`+)|(?<!\\)(`+)(.+?)(?<!`)\3(?!`))'
# Add Inline Patterns. We use a custom numbering of the
# rules, that preserves the order from upstream but leaves
# space for us to add our own.
reg = markdown.util.Registry()
reg.register(BacktickPattern(BACKTICK_RE), 'backtick', 105)
reg.register(markdown.inlinepatterns.DoubleTagPattern(STRONG_EM_RE, 'strong,em'), 'strong_em', 100)
reg.register(UserMentionPattern(mention.find_mentions, self), 'usermention', 95)
reg.register(Tex(r'\B(?<!\$)\$\$(?P<body>[^\n_$](\\\$|[^$\n])*)\$\$(?!\$)\B'), 'tex', 90)
reg.register(StreamPattern(get_compiled_stream_link_regex(), self), 'stream', 85)
reg.register(Avatar(AVATAR_REGEX, self), 'avatar', 80)
reg.register(ModalLink(r'!modal_link\((?P<relative_url>[^)]*), (?P<text>[^)]*)\)'), 'modal_link', 75)
# Note that !gravatar syntax should be deprecated long term.
reg.register(Avatar(GRAVATAR_REGEX, self), 'gravatar', 70)
reg.register(UserGroupMentionPattern(mention.user_group_mentions, self), 'usergroupmention', 65)
reg.register(AtomicLinkPattern(get_link_re(), self), 'link', 60)
reg.register(AutoLink(get_web_link_regex(), self), 'autolink', 55)
# Reserve priority 45-54 for Realm Filters
reg = self.register_realm_filters(reg)
reg.register(markdown.inlinepatterns.HtmlInlineProcessor(ENTITY_RE, self), 'entity', 40)
reg.register(markdown.inlinepatterns.SimpleTagPattern(r'(\*\*)([^\n]+?)\2', 'strong'), 'strong', 35)
reg.register(markdown.inlinepatterns.SimpleTagPattern(EMPHASIS_RE, 'em'), 'emphasis', 30)
reg.register(markdown.inlinepatterns.SimpleTagPattern(DEL_RE, 'del'), 'del', 25)
reg.register(markdown.inlinepatterns.SimpleTextInlineProcessor(NOT_STRONG_RE), 'not_strong', 20)
reg.register(Emoji(EMOJI_REGEX, self), 'emoji', 15)
reg.register(EmoticonTranslation(emoticon_regex, self), 'translate_emoticons', 10)
# We get priority 5 from 'nl2br' extension
reg.register(UnicodeEmoji(unicode_emoji_regex), 'unicodeemoji', 0)
return reg
def register_realm_filters(self, inlinePatterns: markdown.util.Registry) -> markdown.util.Registry:
for (pattern, format_string, id) in self.getConfig("realm_filters"):
inlinePatterns.register(RealmFilterPattern(pattern, format_string, self),
'realm_filters/%s' % (pattern), 45)
return inlinePatterns
def build_treeprocessors(self) -> markdown.util.Registry:
# Here we build all the processors from upstream, plus a few of our own.
treeprocessors = markdown.util.Registry()
# We get priority 30 from 'hilite' extension
treeprocessors.register(markdown.treeprocessors.InlineProcessor(self), 'inline', 25)
treeprocessors.register(markdown.treeprocessors.PrettifyTreeprocessor(self), 'prettify', 20)
treeprocessors.register(InlineInterestingLinkProcessor(self), 'inline_interesting_links', 15)
if settings.CAMO_URI:
treeprocessors.register(InlineHttpsProcessor(self), 'rewrite_to_https', 10)
return treeprocessors
def build_postprocessors(self) -> markdown.util.Registry:
# These are the default python-markdown processors, unmodified.
postprocessors = markdown.util.Registry()
postprocessors.register(markdown.postprocessors.RawHtmlPostprocessor(self), 'raw_html', 20)
postprocessors.register(markdown.postprocessors.AndSubstitutePostprocessor(), 'amp_substitute', 15)
postprocessors.register(markdown.postprocessors.UnescapePostprocessor(), 'unescape', 10)
return postprocessors
def getConfig(self, key: str, default: str='') -> Any:
""" Return a setting for the given key or an empty string. """
if key in self.config:
return self.config[key][0]
else:
return default
def handle_zephyr_mirror(self) -> None:
if self.getConfig("realm") == ZEPHYR_MIRROR_BUGDOWN_KEY:
# Disable almost all inline patterns for zephyr mirror
# users' traffic that is mirrored. Note that
# inline_interesting_links is a treeprocessor and thus is
# not removed
self.inlinePatterns = get_sub_registry(self.inlinePatterns, ['autolink'])
self.treeprocessors = get_sub_registry(self.treeprocessors, ['inline_interesting_links',
'rewrite_to_https'])
# insert new 'inline' processor because we have changed self.inlinePatterns
# but InlineProcessor copies md as self.md in __init__.
self.treeprocessors.register(markdown.treeprocessors.InlineProcessor(self), 'inline', 25)
self.preprocessors = get_sub_registry(self.preprocessors, ['custom_text_notifications'])
self.parser.blockprocessors = get_sub_registry(self.parser.blockprocessors, ['paragraph'])
md_engines = {} # type: Dict[Tuple[int, bool], markdown.Markdown]
realm_filter_data = {} # type: Dict[int, List[Tuple[str, str, int]]]
def make_md_engine(realm_filters_key: int, email_gateway: bool) -> None:
md_engine_key = (realm_filters_key, email_gateway)
if md_engine_key in md_engines:
del md_engines[md_engine_key]
realm_filters = realm_filter_data[realm_filters_key]
md_engines[md_engine_key] = build_engine(
realm_filters=realm_filters,
realm_filters_key=realm_filters_key,
email_gateway=email_gateway,
)
def build_engine(realm_filters: List[Tuple[str, str, int]],
realm_filters_key: int,
email_gateway: bool) -> markdown.Markdown:
engine = Bugdown(
realm_filters=realm_filters,
realm=realm_filters_key,
code_block_processor_disabled=email_gateway,
extensions = [
nl2br.makeExtension(),
tables.makeExtension(),
codehilite.makeExtension(
linenums=False,
guess_lang=False
),
])
return engine
def topic_links(realm_filters_key: int, topic_name: str) -> List[str]:
matches = [] # type: List[str]
realm_filters = realm_filters_for_realm(realm_filters_key)
for realm_filter in realm_filters:
pattern = prepare_realm_pattern(realm_filter[0])
for m in re.finditer(pattern, topic_name):
matches += [realm_filter[1] % m.groupdict()]
return matches
def maybe_update_markdown_engines(realm_filters_key: Optional[int], email_gateway: bool) -> None:
# If realm_filters_key is None, load all filters
global realm_filter_data
if realm_filters_key is None:
all_filters = all_realm_filters()
all_filters[DEFAULT_BUGDOWN_KEY] = []
for realm_filters_key, filters in all_filters.items():
realm_filter_data[realm_filters_key] = filters
make_md_engine(realm_filters_key, email_gateway)
# Hack to ensure that getConfig("realm") is right for mirrored Zephyrs
realm_filter_data[ZEPHYR_MIRROR_BUGDOWN_KEY] = []
make_md_engine(ZEPHYR_MIRROR_BUGDOWN_KEY, False)
else:
realm_filters = realm_filters_for_realm(realm_filters_key)
if realm_filters_key not in realm_filter_data or \
realm_filter_data[realm_filters_key] != realm_filters:
# Realm filters data has changed, update `realm_filter_data` and any
# of the existing markdown engines using this set of realm filters.
realm_filter_data[realm_filters_key] = realm_filters
for email_gateway_flag in [True, False]:
if (realm_filters_key, email_gateway_flag) in md_engines:
# Update only existing engines(if any), don't create new one.
make_md_engine(realm_filters_key, email_gateway_flag)
if (realm_filters_key, email_gateway) not in md_engines:
# Markdown engine corresponding to this key doesn't exists so create one.
make_md_engine(realm_filters_key, email_gateway)
# We want to log Markdown parser failures, but shouldn't log the actual input
# message for privacy reasons. The compromise is to replace all alphanumeric
# characters with 'x'.
#
# We also use repr() to improve reproducibility, and to escape terminal control
# codes, which can do surprisingly nasty things.
_privacy_re = re.compile('\\w', flags=re.UNICODE)
def privacy_clean_markdown(content: str) -> str:
return repr(_privacy_re.sub('x', content))
def log_bugdown_error(msg: str) -> None:
"""We use this unusual logging approach to log the bugdown error, in
order to prevent AdminNotifyHandler from sending the santized
original markdown formatting into another Zulip message, which
could cause an infinite exception loop."""
bugdown_logger.error(msg)
def get_email_info(realm_id: int, emails: Set[str]) -> Dict[str, FullNameInfo]:
if not emails:
return dict()
q_list = {
Q(email__iexact=email.strip().lower())
for email in emails
}
rows = UserProfile.objects.filter(
realm_id=realm_id
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'email',
)
dct = {
row['email'].strip().lower(): row
for row in rows
}
return dct
def get_possible_mentions_info(realm_id: int, mention_texts: Set[str]) -> List[FullNameInfo]:
if not mention_texts:
return list()
# Remove the trailing part of the `name|id` mention syntax,
# thus storing only full names in full_names.
full_names = set()
name_re = r'(?P<full_name>.+)\|\d+$'
for mention_text in mention_texts:
name_syntax_match = re.match(name_re, mention_text)
if name_syntax_match:
full_names.add(name_syntax_match.group("full_name"))
else:
full_names.add(mention_text)
q_list = {
Q(full_name__iexact=full_name)
for full_name in full_names
}
rows = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'full_name',
'email',
)
return list(rows)
class MentionData:
def __init__(self, realm_id: int, content: str) -> None:
mention_texts = possible_mentions(content)
possible_mentions_info = get_possible_mentions_info(realm_id, mention_texts)
self.full_name_info = {
row['full_name'].lower(): row
for row in possible_mentions_info
}
self.user_id_info = {
row['id']: row
for row in possible_mentions_info
}
self.init_user_group_data(realm_id=realm_id, content=content)
def init_user_group_data(self,
realm_id: int,
content: str) -> None:
user_group_names = possible_user_group_mentions(content)
self.user_group_name_info = get_user_group_name_info(realm_id, user_group_names)
self.user_group_members = defaultdict(list) # type: Dict[int, List[int]]
group_ids = [group.id for group in self.user_group_name_info.values()]
if not group_ids:
# Early-return to avoid the cost of hitting the ORM,
# which shows up in profiles.
return
membership = UserGroupMembership.objects.filter(user_group_id__in=group_ids)
for info in membership.values('user_group_id', 'user_profile_id'):
group_id = info['user_group_id']
user_profile_id = info['user_profile_id']
self.user_group_members[group_id].append(user_profile_id)
def get_user_by_name(self, name: str) -> Optional[FullNameInfo]:
# warning: get_user_by_name is not dependable if two
# users of the same full name are mentioned. Use
# get_user_by_id where possible.
return self.full_name_info.get(name.lower(), None)
def get_user_by_id(self, id: str) -> Optional[FullNameInfo]:
return self.user_id_info.get(int(id), None)
def get_user_ids(self) -> Set[int]:
"""
Returns the user IDs that might have been mentioned by this
content. Note that because this data structure has not parsed
the message and does not know about escaping/code blocks, this
will overestimate the list of user ids.
"""
return set(self.user_id_info.keys())
def get_user_group(self, name: str) -> Optional[UserGroup]:
return self.user_group_name_info.get(name.lower(), None)
def get_group_members(self, user_group_id: int) -> List[int]:
return self.user_group_members.get(user_group_id, [])
def get_user_group_name_info(realm_id: int, user_group_names: Set[str]) -> Dict[str, UserGroup]:
if not user_group_names:
return dict()
rows = UserGroup.objects.filter(realm_id=realm_id,
name__in=user_group_names)
dct = {row.name.lower(): row for row in rows}
return dct
def get_stream_name_info(realm: Realm, stream_names: Set[str]) -> Dict[str, FullNameInfo]:
if not stream_names:
return dict()
q_list = {
Q(name=name)
for name in stream_names
}
rows = get_active_streams(
realm=realm,
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'name',
)
dct = {
row['name']: row
for row in rows
}
return dct
def do_convert(content: str,
message: Optional[Message]=None,
message_realm: Optional[Realm]=None,
possible_words: Optional[Set[str]]=None,
sent_by_bot: Optional[bool]=False,
translate_emoticons: Optional[bool]=False,
mention_data: Optional[MentionData]=None,
email_gateway: Optional[bool]=False,
no_previews: Optional[bool]=False) -> str:
"""Convert Markdown to HTML, with Zulip-specific settings and hacks."""
# This logic is a bit convoluted, but the overall goal is to support a range of use cases:
# * Nothing is passed in other than content -> just run default options (e.g. for docs)
# * message is passed, but no realm is -> look up realm from message
# * message_realm is passed -> use that realm for bugdown purposes
if message is not None:
if message_realm is None:
message_realm = message.get_realm()
if message_realm is None:
realm_filters_key = DEFAULT_BUGDOWN_KEY
else:
realm_filters_key = message_realm.id
if message and hasattr(message, 'id') and message.id:
logging_message_id = 'id# ' + str(message.id)
else:
logging_message_id = 'unknown'
if message is not None and message_realm is not None:
if message_realm.is_zephyr_mirror_realm:
if message.sending_client.name == "zephyr_mirror":
# Use slightly customized Markdown processor for content
# delivered via zephyr_mirror
realm_filters_key = ZEPHYR_MIRROR_BUGDOWN_KEY
maybe_update_markdown_engines(realm_filters_key, email_gateway)
md_engine_key = (realm_filters_key, email_gateway)
if md_engine_key in md_engines:
_md_engine = md_engines[md_engine_key]
else:
if DEFAULT_BUGDOWN_KEY not in md_engines:
maybe_update_markdown_engines(realm_filters_key=None, email_gateway=False)
_md_engine = md_engines[(DEFAULT_BUGDOWN_KEY, email_gateway)]
# Reset the parser; otherwise it will get slower over time.
_md_engine.reset()
# Filters such as UserMentionPattern need a message.
_md_engine.zulip_message = message
_md_engine.zulip_realm = message_realm
_md_engine.zulip_db_data = None # for now
_md_engine.image_preview_enabled = image_preview_enabled(
message, message_realm, no_previews)
_md_engine.url_embed_preview_enabled = url_embed_preview_enabled(
message, message_realm, no_previews)
# Pre-fetch data from the DB that is used in the bugdown thread
if message is not None:
assert message_realm is not None # ensured above if message is not None
if possible_words is None:
possible_words = set() # Set[str]
# Here we fetch the data structures needed to render
# mentions/avatars/stream mentions from the database, but only
# if there is syntax in the message that might use them, since
# the fetches are somewhat expensive and these types of syntax
# are uncommon enough that it's a useful optimization.
if mention_data is None:
mention_data = MentionData(message_realm.id, content)
emails = possible_avatar_emails(content)
email_info = get_email_info(message_realm.id, emails)
stream_names = possible_linked_stream_names(content)
stream_name_info = get_stream_name_info(message_realm, stream_names)
if content_has_emoji_syntax(content):
active_realm_emoji = message_realm.get_active_emoji()
else:
active_realm_emoji = dict()
_md_engine.zulip_db_data = {
'possible_words': possible_words,
'email_info': email_info,
'mention_data': mention_data,
'active_realm_emoji': active_realm_emoji,
'realm_uri': message_realm.uri,
'sent_by_bot': sent_by_bot,
'stream_names': stream_name_info,
'translate_emoticons': translate_emoticons,
}
try:
# Spend at most 5 seconds rendering; this protects the backend
# from being overloaded by bugs (e.g. markdown logic that is
# extremely inefficient in corner cases) as well as user
# errors (e.g. a realm filter that makes some syntax
# infinite-loop).
rendered_content = timeout(5, _md_engine.convert, content)
# Throw an exception if the content is huge; this protects the
# rest of the codebase from any bugs where we end up rendering
# something huge.
if len(rendered_content) > MAX_MESSAGE_LENGTH * 10:
raise BugdownRenderingException('Rendered content exceeds %s characters (message %s)' %
(MAX_MESSAGE_LENGTH * 10, logging_message_id))
return rendered_content
except Exception:
cleaned = privacy_clean_markdown(content)
# NOTE: Don't change this message without also changing the
# logic in logging_handlers.py or we can create recursive
# exceptions.
exception_message = ('Exception in Markdown parser: %sInput (sanitized) was: %s\n (message %s)'
% (traceback.format_exc(), cleaned, logging_message_id))
bugdown_logger.exception(exception_message)
raise BugdownRenderingException()
finally:
# These next three lines are slightly paranoid, since
# we always set these right before actually using the
# engine, but better safe then sorry.
_md_engine.zulip_message = None
_md_engine.zulip_realm = None
_md_engine.zulip_db_data = None
bugdown_time_start = 0.0
bugdown_total_time = 0.0
bugdown_total_requests = 0
def get_bugdown_time() -> float:
return bugdown_total_time
def get_bugdown_requests() -> int:
return bugdown_total_requests
def bugdown_stats_start() -> None:
global bugdown_time_start
bugdown_time_start = time.time()
def bugdown_stats_finish() -> None:
global bugdown_total_time
global bugdown_total_requests
global bugdown_time_start
bugdown_total_requests += 1
bugdown_total_time += (time.time() - bugdown_time_start)
def convert(content: str,
message: Optional[Message]=None,
message_realm: Optional[Realm]=None,
possible_words: Optional[Set[str]]=None,
sent_by_bot: Optional[bool]=False,
translate_emoticons: Optional[bool]=False,
mention_data: Optional[MentionData]=None,
email_gateway: Optional[bool]=False,
no_previews: Optional[bool]=False) -> str:
bugdown_stats_start()
ret = do_convert(content, message, message_realm,
possible_words, sent_by_bot, translate_emoticons,
mention_data, email_gateway, no_previews=no_previews)
bugdown_stats_finish()
return ret
| ./CrossVul/dataset_final_sorted/CWE-400/py/good_1090_0 |
crossvul-python_data_bad_4092_3 | import logging
import warnings
from base64 import b32encode
from binascii import unhexlify
import django_otp
import qrcode
import qrcode.image.svg
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME, login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.views import SuccessURLAllowedHostsMixin
from django.contrib.sites.shortcuts import get_current_site
from django.forms import Form
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, resolve_url
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.http import is_safe_url
from django.utils.module_loading import import_string
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import DeleteView, FormView, TemplateView
from django.views.generic.base import View
from django_otp.decorators import otp_required
from django_otp.plugins.otp_static.models import StaticDevice, StaticToken
from two_factor import signals
from two_factor.models import get_available_methods, random_hex_str
from two_factor.utils import totp_digits
from ..forms import (
AuthenticationTokenForm, BackupTokenForm, DeviceValidationForm, MethodForm,
PhoneNumberForm, PhoneNumberMethodForm, TOTPDeviceForm, YubiKeyDeviceForm,
)
from ..models import PhoneDevice, get_available_phone_methods
from ..utils import backup_phones, default_device, get_otpauth_url
from .utils import IdempotentSessionWizardView, class_view_decorator
try:
from otp_yubikey.models import ValidationService, RemoteYubikeyDevice
except ImportError:
ValidationService = RemoteYubikeyDevice = None
logger = logging.getLogger(__name__)
@class_view_decorator(sensitive_post_parameters())
@class_view_decorator(never_cache)
class LoginView(SuccessURLAllowedHostsMixin, IdempotentSessionWizardView):
"""
View for handling the login process, including OTP verification.
The login process is composed like a wizard. The first step asks for the
user's credentials. If the credentials are correct, the wizard proceeds to
the OTP verification step. If the user has a default OTP device configured,
that device is asked to generate a token (send sms / call phone) and the
user is asked to provide the generated token. The backup devices are also
listed, allowing the user to select a backup device for verification.
"""
template_name = 'two_factor/core/login.html'
form_list = (
('auth', AuthenticationForm),
('token', AuthenticationTokenForm),
('backup', BackupTokenForm),
)
idempotent_dict = {
'token': False,
'backup': False,
}
redirect_authenticated_user = False
def has_token_step(self):
return default_device(self.get_user())
def has_backup_step(self):
return default_device(self.get_user()) and \
'token' not in self.storage.validated_step_data
condition_dict = {
'token': has_token_step,
'backup': has_backup_step,
}
redirect_field_name = REDIRECT_FIELD_NAME
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.user_cache = None
self.device_cache = None
def post(self, *args, **kwargs):
"""
The user can select a particular device to challenge, being the backup
devices added to the account.
"""
# Generating a challenge doesn't require to validate the form.
if 'challenge_device' in self.request.POST:
return self.render_goto_step('token')
return super().post(*args, **kwargs)
def done(self, form_list, **kwargs):
"""
Login the user and redirect to the desired page.
"""
login(self.request, self.get_user())
redirect_to = self.get_success_url()
device = getattr(self.get_user(), 'otp_device', None)
if device:
signals.user_verified.send(sender=__name__, request=self.request,
user=self.get_user(), device=device)
return redirect(redirect_to)
# Copied from django.conrib.auth.views.LoginView (Branch: stable/1.11.x)
# https://github.com/django/django/blob/58df8aa40fe88f753ba79e091a52f236246260b3/django/contrib/auth/views.py#L63
def get_success_url(self):
url = self.get_redirect_url()
return url or resolve_url(settings.LOGIN_REDIRECT_URL)
# Copied from django.conrib.auth.views.LoginView (Branch: stable/1.11.x)
# https://github.com/django/django/blob/58df8aa40fe88f753ba79e091a52f236246260b3/django/contrib/auth/views.py#L67
def get_redirect_url(self):
"""Return the user-originating redirect URL if it's safe."""
redirect_to = self.request.POST.get(
self.redirect_field_name,
self.request.GET.get(self.redirect_field_name, '')
)
url_is_safe = is_safe_url(
url=redirect_to,
allowed_hosts=self.get_success_url_allowed_hosts(),
require_https=self.request.is_secure(),
)
return redirect_to if url_is_safe else ''
def get_form_kwargs(self, step=None):
"""
AuthenticationTokenForm requires the user kwarg.
"""
if step == 'auth':
return {
'request': self.request
}
if step in ('token', 'backup'):
return {
'user': self.get_user(),
'initial_device': self.get_device(step),
}
return {}
def get_device(self, step=None):
"""
Returns the OTP device selected by the user, or his default device.
"""
if not self.device_cache:
challenge_device_id = self.request.POST.get('challenge_device', None)
if challenge_device_id:
for device in backup_phones(self.get_user()):
if device.persistent_id == challenge_device_id:
self.device_cache = device
break
if step == 'backup':
try:
self.device_cache = self.get_user().staticdevice_set.get(name='backup')
except StaticDevice.DoesNotExist:
pass
if not self.device_cache:
self.device_cache = default_device(self.get_user())
return self.device_cache
def render(self, form=None, **kwargs):
"""
If the user selected a device, ask the device to generate a challenge;
either making a phone call or sending a text message.
"""
if self.steps.current == 'token':
self.get_device().generate_challenge()
return super().render(form, **kwargs)
def get_user(self):
"""
Returns the user authenticated by the AuthenticationForm. Returns False
if not a valid user; see also issue #65.
"""
if not self.user_cache:
form_obj = self.get_form(step='auth',
data=self.storage.get_step_data('auth'))
self.user_cache = form_obj.is_valid() and form_obj.user_cache
return self.user_cache
def get_context_data(self, form, **kwargs):
"""
Adds user's default and backup OTP devices to the context.
"""
context = super().get_context_data(form, **kwargs)
if self.steps.current == 'token':
context['device'] = self.get_device()
context['other_devices'] = [
phone for phone in backup_phones(self.get_user())
if phone != self.get_device()]
try:
context['backup_tokens'] = self.get_user().staticdevice_set\
.get(name='backup').token_set.count()
except StaticDevice.DoesNotExist:
context['backup_tokens'] = 0
if getattr(settings, 'LOGOUT_REDIRECT_URL', None):
context['cancel_url'] = resolve_url(settings.LOGOUT_REDIRECT_URL)
elif getattr(settings, 'LOGOUT_URL', None):
warnings.warn(
"LOGOUT_URL has been replaced by LOGOUT_REDIRECT_URL, please "
"review the URL and update your settings.",
DeprecationWarning)
context['cancel_url'] = resolve_url(settings.LOGOUT_URL)
return context
# Copied from django.conrib.auth.views.LoginView (Branch: stable/1.11.x)
# https://github.com/django/django/blob/58df8aa40fe88f753ba79e091a52f236246260b3/django/contrib/auth/views.py#L49
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
if self.redirect_authenticated_user and self.request.user.is_authenticated:
redirect_to = self.get_success_url()
if redirect_to == self.request.path:
raise ValueError(
"Redirection loop for authenticated user detected. Check that "
"your LOGIN_REDIRECT_URL doesn't point to a login page."
)
return HttpResponseRedirect(redirect_to)
return super().dispatch(request, *args, **kwargs)
@class_view_decorator(never_cache)
@class_view_decorator(login_required)
class SetupView(IdempotentSessionWizardView):
"""
View for handling OTP setup using a wizard.
The first step of the wizard shows an introduction text, explaining how OTP
works and why it should be enabled. The user has to select the verification
method (generator / call / sms) in the second step. Depending on the method
selected, the third step configures the device. For the generator method, a
QR code is shown which can be scanned using a mobile phone app and the user
is asked to provide a generated token. For call and sms methods, the user
provides the phone number which is then validated in the final step.
"""
success_url = 'two_factor:setup_complete'
qrcode_url = 'two_factor:qr'
template_name = 'two_factor/core/setup.html'
session_key_name = 'django_two_factor-qr_secret_key'
initial_dict = {}
form_list = (
('welcome', Form),
('method', MethodForm),
('generator', TOTPDeviceForm),
('sms', PhoneNumberForm),
('call', PhoneNumberForm),
('validation', DeviceValidationForm),
('yubikey', YubiKeyDeviceForm),
)
condition_dict = {
'generator': lambda self: self.get_method() == 'generator',
'call': lambda self: self.get_method() == 'call',
'sms': lambda self: self.get_method() == 'sms',
'validation': lambda self: self.get_method() in ('sms', 'call'),
'yubikey': lambda self: self.get_method() == 'yubikey',
}
idempotent_dict = {
'yubikey': False,
}
def get_method(self):
method_data = self.storage.validated_step_data.get('method', {})
return method_data.get('method', None)
def get(self, request, *args, **kwargs):
"""
Start the setup wizard. Redirect if already enabled.
"""
if default_device(self.request.user):
return redirect(self.success_url)
return super().get(request, *args, **kwargs)
def get_form_list(self):
"""
Check if there is only one method, then skip the MethodForm from form_list
"""
form_list = super().get_form_list()
available_methods = get_available_methods()
if len(available_methods) == 1:
form_list.pop('method', None)
method_key, _ = available_methods[0]
self.storage.validated_step_data['method'] = {'method': method_key}
return form_list
def render_next_step(self, form, **kwargs):
"""
In the validation step, ask the device to generate a challenge.
"""
next_step = self.steps.next
if next_step == 'validation':
try:
self.get_device().generate_challenge()
kwargs["challenge_succeeded"] = True
except Exception:
logger.exception("Could not generate challenge")
kwargs["challenge_succeeded"] = False
return super().render_next_step(form, **kwargs)
def done(self, form_list, **kwargs):
"""
Finish the wizard. Save all forms and redirect.
"""
# Remove secret key used for QR code generation
try:
del self.request.session[self.session_key_name]
except KeyError:
pass
# TOTPDeviceForm
if self.get_method() == 'generator':
form = [form for form in form_list if isinstance(form, TOTPDeviceForm)][0]
device = form.save()
# PhoneNumberForm / YubiKeyDeviceForm
elif self.get_method() in ('call', 'sms', 'yubikey'):
device = self.get_device()
device.save()
else:
raise NotImplementedError("Unknown method '%s'" % self.get_method())
django_otp.login(self.request, device)
return redirect(self.success_url)
def get_form_kwargs(self, step=None):
kwargs = {}
if step == 'generator':
kwargs.update({
'key': self.get_key(step),
'user': self.request.user,
})
if step in ('validation', 'yubikey'):
kwargs.update({
'device': self.get_device()
})
metadata = self.get_form_metadata(step)
if metadata:
kwargs.update({
'metadata': metadata,
})
return kwargs
def get_device(self, **kwargs):
"""
Uses the data from the setup step and generated key to recreate device.
Only used for call / sms -- generator uses other procedure.
"""
method = self.get_method()
kwargs = kwargs or {}
kwargs['name'] = 'default'
kwargs['user'] = self.request.user
if method in ('call', 'sms'):
kwargs['method'] = method
kwargs['number'] = self.storage.validated_step_data\
.get(method, {}).get('number')
return PhoneDevice(key=self.get_key(method), **kwargs)
if method == 'yubikey':
kwargs['public_id'] = self.storage.validated_step_data\
.get('yubikey', {}).get('token', '')[:-32]
try:
kwargs['service'] = ValidationService.objects.get(name='default')
except ValidationService.DoesNotExist:
raise KeyError("No ValidationService found with name 'default'")
except ValidationService.MultipleObjectsReturned:
raise KeyError("Multiple ValidationService found with name 'default'")
return RemoteYubikeyDevice(**kwargs)
def get_key(self, step):
self.storage.extra_data.setdefault('keys', {})
if step in self.storage.extra_data['keys']:
return self.storage.extra_data['keys'].get(step)
key = random_hex_str(20)
self.storage.extra_data['keys'][step] = key
return key
def get_context_data(self, form, **kwargs):
context = super().get_context_data(form, **kwargs)
if self.steps.current == 'generator':
key = self.get_key('generator')
rawkey = unhexlify(key.encode('ascii'))
b32key = b32encode(rawkey).decode('utf-8')
self.request.session[self.session_key_name] = b32key
context.update({
'QR_URL': reverse(self.qrcode_url)
})
elif self.steps.current == 'validation':
context['device'] = self.get_device()
context['cancel_url'] = resolve_url(settings.LOGIN_REDIRECT_URL)
return context
def process_step(self, form):
if hasattr(form, 'metadata'):
self.storage.extra_data.setdefault('forms', {})
self.storage.extra_data['forms'][self.steps.current] = form.metadata
return super().process_step(form)
def get_form_metadata(self, step):
self.storage.extra_data.setdefault('forms', {})
return self.storage.extra_data['forms'].get(step, None)
@class_view_decorator(never_cache)
@class_view_decorator(otp_required)
class BackupTokensView(FormView):
"""
View for listing and generating backup tokens.
A user can generate a number of static backup tokens. When the user loses
its phone, these backup tokens can be used for verification. These backup
tokens should be stored in a safe location; either in a safe or underneath
a pillow ;-).
"""
form_class = Form
success_url = 'two_factor:backup_tokens'
template_name = 'two_factor/core/backup_tokens.html'
number_of_tokens = 10
def get_device(self):
return self.request.user.staticdevice_set.get_or_create(name='backup')[0]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['device'] = self.get_device()
return context
def form_valid(self, form):
"""
Delete existing backup codes and generate new ones.
"""
device = self.get_device()
device.token_set.all().delete()
for n in range(self.number_of_tokens):
device.token_set.create(token=StaticToken.random_token())
return redirect(self.success_url)
@class_view_decorator(never_cache)
@class_view_decorator(otp_required)
class PhoneSetupView(IdempotentSessionWizardView):
"""
View for configuring a phone number for receiving tokens.
A user can have multiple backup :class:`~two_factor.models.PhoneDevice`
for receiving OTP tokens. If the primary phone number is not available, as
the battery might have drained or the phone is lost, these backup phone
numbers can be used for verification.
"""
template_name = 'two_factor/core/phone_register.html'
success_url = settings.LOGIN_REDIRECT_URL
form_list = (
('setup', PhoneNumberMethodForm),
('validation', DeviceValidationForm),
)
key_name = 'key'
def get(self, request, *args, **kwargs):
"""
Start the setup wizard. Redirect if no phone methods available.
"""
if not get_available_phone_methods():
return redirect(self.success_url)
return super().get(request, *args, **kwargs)
def done(self, form_list, **kwargs):
"""
Store the device and redirect to profile page.
"""
self.get_device(user=self.request.user, name='backup').save()
return redirect(self.success_url)
def render_next_step(self, form, **kwargs):
"""
In the validation step, ask the device to generate a challenge.
"""
next_step = self.steps.next
if next_step == 'validation':
self.get_device().generate_challenge()
return super().render_next_step(form, **kwargs)
def get_form_kwargs(self, step=None):
"""
Provide the device to the DeviceValidationForm.
"""
if step == 'validation':
return {'device': self.get_device()}
return {}
def get_device(self, **kwargs):
"""
Uses the data from the setup step and generated key to recreate device.
"""
kwargs = kwargs or {}
kwargs.update(self.storage.validated_step_data.get('setup', {}))
return PhoneDevice(key=self.get_key(), **kwargs)
def get_key(self):
"""
The key is preserved between steps and stored as ascii in the session.
"""
if self.key_name not in self.storage.extra_data:
key = random_hex_str(20)
self.storage.extra_data[self.key_name] = key
return self.storage.extra_data[self.key_name]
def get_context_data(self, form, **kwargs):
kwargs.setdefault('cancel_url', resolve_url(self.success_url))
return super().get_context_data(form, **kwargs)
@class_view_decorator(never_cache)
@class_view_decorator(otp_required)
class PhoneDeleteView(DeleteView):
"""
View for removing a phone number used for verification.
"""
success_url = settings.LOGIN_REDIRECT_URL
def get_queryset(self):
return self.request.user.phonedevice_set.filter(name='backup')
def get_success_url(self):
return resolve_url(self.success_url)
@class_view_decorator(never_cache)
@class_view_decorator(otp_required)
class SetupCompleteView(TemplateView):
"""
View congratulation the user when OTP setup has completed.
"""
template_name = 'two_factor/core/setup_complete.html'
def get_context_data(self):
return {
'phone_methods': get_available_phone_methods(),
}
@class_view_decorator(never_cache)
@class_view_decorator(login_required)
class QRGeneratorView(View):
"""
View returns an SVG image with the OTP token information
"""
http_method_names = ['get']
default_qr_factory = 'qrcode.image.svg.SvgPathImage'
session_key_name = 'django_two_factor-qr_secret_key'
# The qrcode library only supports PNG and SVG for now
image_content_types = {
'PNG': 'image/png',
'SVG': 'image/svg+xml; charset=utf-8',
}
def get_issuer(self):
return get_current_site(self.request).name
def get(self, request, *args, **kwargs):
# Get the data from the session
try:
key = self.request.session[self.session_key_name]
except KeyError:
raise Http404()
# Get data for qrcode
image_factory_string = getattr(settings, 'TWO_FACTOR_QR_FACTORY', self.default_qr_factory)
image_factory = import_string(image_factory_string)
content_type = self.image_content_types[image_factory.kind]
try:
username = self.request.user.get_username()
except AttributeError:
username = self.request.user.username
otpauth_url = get_otpauth_url(accountname=username,
issuer=self.get_issuer(),
secret=key,
digits=totp_digits())
# Make and return QR code
img = qrcode.make(otpauth_url, image_factory=image_factory)
resp = HttpResponse(content_type=content_type)
img.save(resp)
return resp
| ./CrossVul/dataset_final_sorted/CWE-312/py/bad_4092_3 |
crossvul-python_data_good_4092_4 | import logging
from django.contrib.auth import load_backend
from django.core.exceptions import SuspiciousOperation
from django.utils.decorators import method_decorator
from django.utils.translation import gettext as _
from formtools.wizard.forms import ManagementForm
from formtools.wizard.storage.session import SessionStorage
from formtools.wizard.views import SessionWizardView
logger = logging.getLogger(__name__)
class ExtraSessionStorage(SessionStorage):
"""
SessionStorage that includes the property `validated_step_data` for storing
cleaned form data per step.
"""
validated_step_data_key = 'validated_step_data'
def init_data(self):
super().init_data()
self.data[self.validated_step_data_key] = {}
def reset(self):
if self.prefix in self.request.session:
super().reset()
else:
self.init_data()
def _get_validated_step_data(self):
return self.data[self.validated_step_data_key]
def _set_validated_step_data(self, validated_step_data):
self.data[self.validated_step_data_key] = validated_step_data
validated_step_data = property(_get_validated_step_data,
_set_validated_step_data)
class LoginStorage(ExtraSessionStorage):
"""
SessionStorage that includes the property 'authenticated_user' for storing
backend authenticated users while logging in.
"""
def _get_authenticated_user(self):
# Ensure that both user_pk and user_backend exist in the session
if not all([self.data.get("user_pk"), self.data.get("user_backend")]):
return False
# Acquire the user the same way django.contrib.auth.get_user does
backend = load_backend(self.data["user_backend"])
user = backend.get_user(self.data["user_pk"])
if not user:
return False
# Set user.backend to the dotted path version of the backend for login()
user.backend = self.data["user_backend"]
return user
def _set_authenticated_user(self, user):
# Acquire the PK the same way django's auth middleware does
self.data["user_pk"] = user._meta.pk.value_to_string(user)
self.data["user_backend"] = user.backend
authenticated_user = property(_get_authenticated_user,
_set_authenticated_user)
class IdempotentSessionWizardView(SessionWizardView):
"""
WizardView that allows certain steps to be marked non-idempotent, in which
case the form is only validated once and the cleaned values stored.
"""
storage_name = 'two_factor.views.utils.ExtraSessionStorage'
idempotent_dict = {}
def is_step_visible(self, step):
"""
Returns whether the given `step` should be included in the wizard; it
is included if either the form is idempotent or not filled in before.
"""
return self.idempotent_dict.get(step, True) or \
step not in self.storage.validated_step_data
def get_prev_step(self, step=None):
"""
Returns the previous step before the given `step`. If there are no
steps available, None will be returned. If the `step` argument is
None, the current step will be determined automatically.
"""
if step is None:
step = self.steps.current
form_list = self.get_form_list()
keys = list(form_list.keys())
key = keys.index(step) - 1
if key >= 0:
for prev_step in keys[key::-1]:
if self.is_step_visible(prev_step):
return prev_step
return None
def get_next_step(self, step=None):
"""
Returns the next step after the given `step`. If no more steps are
available, None will be returned. If the `step` argument is None, the
current step will be determined automatically.
"""
if step is None:
step = self.steps.current
form_list = self.get_form_list()
keys = list(form_list.keys())
key = keys.index(step) + 1
for next_step in keys[key:]:
if self.is_step_visible(next_step):
return next_step
return None
def post(self, *args, **kwargs):
"""
Check if the current step is still available. It might not be if
conditions have changed.
"""
if self.steps.current not in self.steps.all:
logger.warning("Current step '%s' is no longer valid, returning "
"to last valid step in the wizard.",
self.steps.current)
return self.render_goto_step(self.steps.all[-1])
# -- Duplicated code from upstream
# Look for a wizard_goto_step element in the posted data which
# contains a valid step name. If one was found, render the requested
# form. (This makes stepping back a lot easier).
wizard_goto_step = self.request.POST.get('wizard_goto_step', None)
if wizard_goto_step and wizard_goto_step in self.get_form_list():
return self.render_goto_step(wizard_goto_step)
# Check if form was refreshed
management_form = ManagementForm(self.request.POST, prefix=self.prefix)
if not management_form.is_valid():
raise SuspiciousOperation(_('ManagementForm data is missing or has been tampered with'))
form_current_step = management_form.cleaned_data['current_step']
if (form_current_step != self.steps.current
and self.storage.current_step is not None):
# form refreshed, change current step
self.storage.current_step = form_current_step
# -- End duplicated code from upstream
# This is different from the first check, as this checks
# if the new step is available. See issue #65.
if self.steps.current not in self.steps.all:
logger.warning("Requested step '%s' is no longer valid, returning "
"to last valid step in the wizard.",
self.steps.current)
return self.render_goto_step(self.steps.all[-1])
return super().post(*args, **kwargs)
def process_step(self, form):
"""
Stores the validated data for `form` and cleans out validated forms
for next steps, as those might be affected by the current step. Note
that this behaviour is relied upon by the `LoginView` to prevent users
from bypassing the `TokenForm` by going steps back and changing
credentials.
"""
step = self.steps.current
# If the form is not-idempotent (cannot be validated multiple times),
# the cleaned data should be stored; marking the form as validated.
self.storage.validated_step_data[step] = form.cleaned_data
# It is assumed that earlier steps affect later steps; so even though
# those forms might not be idempotent, we'll remove the validated data
# to force re-entry.
# form_list = self.get_form_list(idempotent=False)
form_list = self.get_form_list()
keys = list(form_list.keys())
key = keys.index(step) + 1
for next_step in keys[key:]:
self.storage.validated_step_data.pop(next_step, None)
return super().process_step(form)
def get_done_form_list(self):
return self.get_form_list()
def render_done(self, form, **kwargs):
"""
This method gets called when all forms passed. The method should also
re-validate all steps to prevent manipulation. If any form don't
validate, `render_revalidation_failure` should get called.
If everything is fine call `done`.
"""
final_form_list = []
# walk through the form list and try to validate the data again.
for form_key in self.get_done_form_list():
form_obj = self.get_form(step=form_key,
data=self.storage.get_step_data(form_key),
files=self.storage.get_step_files(
form_key))
if not (form_key in self.idempotent_dict or form_obj.is_valid()):
return self.render_revalidation_failure(form_key, form_obj,
**kwargs)
final_form_list.append(form_obj)
# render the done view and reset the wizard before returning the
# response. This is needed to prevent from rendering done with the
# same data twice.
done_response = self.done(final_form_list, **kwargs)
self.storage.reset()
return done_response
def class_view_decorator(function_decorator):
"""
Converts a function based decorator into a class based decorator usable
on class based Views.
Can't subclass the `View` as it breaks inheritance (super in particular),
so we monkey-patch instead.
From: http://stackoverflow.com/a/8429311/58107
"""
def simple_decorator(View):
View.dispatch = method_decorator(function_decorator)(View.dispatch)
return View
return simple_decorator
| ./CrossVul/dataset_final_sorted/CWE-312/py/good_4092_4 |
crossvul-python_data_bad_4092_4 | import logging
from django.core.exceptions import SuspiciousOperation
from django.utils.decorators import method_decorator
from django.utils.translation import gettext as _
from formtools.wizard.forms import ManagementForm
from formtools.wizard.storage.session import SessionStorage
from formtools.wizard.views import SessionWizardView
logger = logging.getLogger(__name__)
class ExtraSessionStorage(SessionStorage):
"""
SessionStorage that includes the property `validated_step_data` for storing
cleaned form data per step.
"""
validated_step_data_key = 'validated_step_data'
def init_data(self):
super().init_data()
self.data[self.validated_step_data_key] = {}
def reset(self):
if self.prefix in self.request.session:
super().reset()
else:
self.init_data()
def _get_validated_step_data(self):
return self.data[self.validated_step_data_key]
def _set_validated_step_data(self, validated_step_data):
self.data[self.validated_step_data_key] = validated_step_data
validated_step_data = property(_get_validated_step_data,
_set_validated_step_data)
class IdempotentSessionWizardView(SessionWizardView):
"""
WizardView that allows certain steps to be marked non-idempotent, in which
case the form is only validated once and the cleaned values stored.
"""
storage_name = 'two_factor.views.utils.ExtraSessionStorage'
idempotent_dict = {}
def is_step_visible(self, step):
"""
Returns whether the given `step` should be included in the wizard; it
is included if either the form is idempotent or not filled in before.
"""
return self.idempotent_dict.get(step, True) or \
step not in self.storage.validated_step_data
def get_prev_step(self, step=None):
"""
Returns the previous step before the given `step`. If there are no
steps available, None will be returned. If the `step` argument is
None, the current step will be determined automatically.
"""
if step is None:
step = self.steps.current
form_list = self.get_form_list()
keys = list(form_list.keys())
key = keys.index(step) - 1
if key >= 0:
for prev_step in keys[key::-1]:
if self.is_step_visible(prev_step):
return prev_step
return None
def get_next_step(self, step=None):
"""
Returns the next step after the given `step`. If no more steps are
available, None will be returned. If the `step` argument is None, the
current step will be determined automatically.
"""
if step is None:
step = self.steps.current
form_list = self.get_form_list()
keys = list(form_list.keys())
key = keys.index(step) + 1
for next_step in keys[key:]:
if self.is_step_visible(next_step):
return next_step
return None
def post(self, *args, **kwargs):
"""
Check if the current step is still available. It might not be if
conditions have changed.
"""
if self.steps.current not in self.steps.all:
logger.warning("Current step '%s' is no longer valid, returning "
"to last valid step in the wizard.",
self.steps.current)
return self.render_goto_step(self.steps.all[-1])
# -- Duplicated code from upstream
# Look for a wizard_goto_step element in the posted data which
# contains a valid step name. If one was found, render the requested
# form. (This makes stepping back a lot easier).
wizard_goto_step = self.request.POST.get('wizard_goto_step', None)
if wizard_goto_step and wizard_goto_step in self.get_form_list():
return self.render_goto_step(wizard_goto_step)
# Check if form was refreshed
management_form = ManagementForm(self.request.POST, prefix=self.prefix)
if not management_form.is_valid():
raise SuspiciousOperation(_('ManagementForm data is missing or has been tampered with'))
form_current_step = management_form.cleaned_data['current_step']
if (form_current_step != self.steps.current
and self.storage.current_step is not None):
# form refreshed, change current step
self.storage.current_step = form_current_step
# -- End duplicated code from upstream
# This is different from the first check, as this checks
# if the new step is available. See issue #65.
if self.steps.current not in self.steps.all:
logger.warning("Requested step '%s' is no longer valid, returning "
"to last valid step in the wizard.",
self.steps.current)
return self.render_goto_step(self.steps.all[-1])
return super().post(*args, **kwargs)
def process_step(self, form):
"""
Stores the validated data for `form` and cleans out validated forms
for next steps, as those might be affected by the current step. Note
that this behaviour is relied upon by the `LoginView` to prevent users
from bypassing the `TokenForm` by going steps back and changing
credentials.
"""
step = self.steps.current
# If the form is not-idempotent (cannot be validated multiple times),
# the cleaned data should be stored; marking the form as validated.
self.storage.validated_step_data[step] = form.cleaned_data
# It is assumed that earlier steps affect later steps; so even though
# those forms might not be idempotent, we'll remove the validated data
# to force re-entry.
# form_list = self.get_form_list(idempotent=False)
form_list = self.get_form_list()
keys = list(form_list.keys())
key = keys.index(step) + 1
for next_step in keys[key:]:
self.storage.validated_step_data.pop(next_step, None)
return super().process_step(form)
def render_done(self, form, **kwargs):
"""
This method gets called when all forms passed. The method should also
re-validate all steps to prevent manipulation. If any form don't
validate, `render_revalidation_failure` should get called.
If everything is fine call `done`.
"""
final_form_list = []
# walk through the form list and try to validate the data again.
for form_key in self.get_form_list():
form_obj = self.get_form(step=form_key,
data=self.storage.get_step_data(form_key),
files=self.storage.get_step_files(
form_key))
if not (form_key in self.idempotent_dict or form_obj.is_valid()):
return self.render_revalidation_failure(form_key, form_obj,
**kwargs)
final_form_list.append(form_obj)
# render the done view and reset the wizard before returning the
# response. This is needed to prevent from rendering done with the
# same data twice.
done_response = self.done(final_form_list, **kwargs)
self.storage.reset()
return done_response
def class_view_decorator(function_decorator):
"""
Converts a function based decorator into a class based decorator usable
on class based Views.
Can't subclass the `View` as it breaks inheritance (super in particular),
so we monkey-patch instead.
From: http://stackoverflow.com/a/8429311/58107
"""
def simple_decorator(View):
View.dispatch = method_decorator(function_decorator)(View.dispatch)
return View
return simple_decorator
| ./CrossVul/dataset_final_sorted/CWE-312/py/bad_4092_4 |
crossvul-python_data_good_4092_3 | import logging
import warnings
from base64 import b32encode
from binascii import unhexlify
import time
import django_otp
import qrcode
import qrcode.image.svg
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME, login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.views import SuccessURLAllowedHostsMixin
from django.contrib.sites.shortcuts import get_current_site
from django.forms import Form, ValidationError
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, resolve_url
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.functional import cached_property
from django.utils.http import is_safe_url
from django.utils.module_loading import import_string
from django.utils.translation import gettext as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import DeleteView, FormView, TemplateView
from django.views.generic.base import View
from django_otp.decorators import otp_required
from django_otp.plugins.otp_static.models import StaticDevice, StaticToken
from two_factor import signals
from two_factor.models import get_available_methods, random_hex_str
from two_factor.utils import totp_digits
from ..forms import (
AuthenticationTokenForm, BackupTokenForm, DeviceValidationForm, MethodForm,
PhoneNumberForm, PhoneNumberMethodForm, TOTPDeviceForm, YubiKeyDeviceForm,
)
from ..models import PhoneDevice, get_available_phone_methods
from ..utils import backup_phones, default_device, get_otpauth_url
from .utils import IdempotentSessionWizardView, class_view_decorator
try:
from otp_yubikey.models import ValidationService, RemoteYubikeyDevice
except ImportError:
ValidationService = RemoteYubikeyDevice = None
logger = logging.getLogger(__name__)
@class_view_decorator(sensitive_post_parameters())
@class_view_decorator(never_cache)
class LoginView(SuccessURLAllowedHostsMixin, IdempotentSessionWizardView):
"""
View for handling the login process, including OTP verification.
The login process is composed like a wizard. The first step asks for the
user's credentials. If the credentials are correct, the wizard proceeds to
the OTP verification step. If the user has a default OTP device configured,
that device is asked to generate a token (send sms / call phone) and the
user is asked to provide the generated token. The backup devices are also
listed, allowing the user to select a backup device for verification.
"""
template_name = 'two_factor/core/login.html'
form_list = (
('auth', AuthenticationForm),
('token', AuthenticationTokenForm),
('backup', BackupTokenForm),
)
idempotent_dict = {
'token': False,
'backup': False,
}
redirect_authenticated_user = False
storage_name = 'two_factor.views.utils.LoginStorage'
def has_token_step(self):
return default_device(self.get_user())
def has_backup_step(self):
return default_device(self.get_user()) and \
'token' not in self.storage.validated_step_data
@cached_property
def expired(self):
login_timeout = getattr(settings, 'TWO_FACTOR_LOGIN_TIMEOUT', 600)
if login_timeout == 0:
return False
expiration_time = self.storage.data.get("authentication_time", 0) + login_timeout
return int(time.time()) > expiration_time
condition_dict = {
'token': has_token_step,
'backup': has_backup_step,
}
redirect_field_name = REDIRECT_FIELD_NAME
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.user_cache = None
self.device_cache = None
self.show_timeout_error = False
def post(self, *args, **kwargs):
"""
The user can select a particular device to challenge, being the backup
devices added to the account.
"""
wizard_goto_step = self.request.POST.get('wizard_goto_step', None)
if wizard_goto_step == 'auth':
self.storage.reset()
if self.expired and self.steps.current != 'auth':
logger.info("User's authentication flow has timed out. The user "
"has been redirected to the initial auth form.")
self.storage.reset()
self.show_timeout_error = True
return self.render_goto_step('auth')
# Generating a challenge doesn't require to validate the form.
if 'challenge_device' in self.request.POST:
return self.render_goto_step('token')
return super().post(*args, **kwargs)
def done(self, form_list, **kwargs):
"""
Login the user and redirect to the desired page.
"""
login(self.request, self.get_user())
redirect_to = self.get_success_url()
device = getattr(self.get_user(), 'otp_device', None)
if device:
signals.user_verified.send(sender=__name__, request=self.request,
user=self.get_user(), device=device)
return redirect(redirect_to)
# Copied from django.conrib.auth.views.LoginView (Branch: stable/1.11.x)
# https://github.com/django/django/blob/58df8aa40fe88f753ba79e091a52f236246260b3/django/contrib/auth/views.py#L63
def get_success_url(self):
url = self.get_redirect_url()
return url or resolve_url(settings.LOGIN_REDIRECT_URL)
# Copied from django.conrib.auth.views.LoginView (Branch: stable/1.11.x)
# https://github.com/django/django/blob/58df8aa40fe88f753ba79e091a52f236246260b3/django/contrib/auth/views.py#L67
def get_redirect_url(self):
"""Return the user-originating redirect URL if it's safe."""
redirect_to = self.request.POST.get(
self.redirect_field_name,
self.request.GET.get(self.redirect_field_name, '')
)
url_is_safe = is_safe_url(
url=redirect_to,
allowed_hosts=self.get_success_url_allowed_hosts(),
require_https=self.request.is_secure(),
)
return redirect_to if url_is_safe else ''
def get_form_kwargs(self, step=None):
"""
AuthenticationTokenForm requires the user kwarg.
"""
if step == 'auth':
return {
'request': self.request
}
if step in ('token', 'backup'):
return {
'user': self.get_user(),
'initial_device': self.get_device(step),
}
return {}
def get_done_form_list(self):
"""
Return the forms that should be processed during the final step
"""
# Intentionally do not process the auth form on the final step. We
# haven't stored this data, and it isn't required to login the user
form_list = self.get_form_list()
form_list.pop('auth')
return form_list
def process_step(self, form):
"""
Process an individual step in the flow
"""
# To prevent saving any private auth data to the session store, we
# validate the authentication form, determine the resulting user, then
# only store the minimum needed to login that user (the user's primary
# key and the backend used)
if self.steps.current == 'auth':
user = form.is_valid() and form.user_cache
self.storage.reset()
self.storage.authenticated_user = user
self.storage.data["authentication_time"] = int(time.time())
# By returning None when the user clicks the "back" button to the
# auth step the form will be blank with validation warnings
return None
return super().process_step(form)
def process_step_files(self, form):
"""
Process the files submitted from a specific test
"""
if self.steps.current == 'auth':
return {}
return super().process_step_files(form)
def get_form(self, *args, **kwargs):
"""
Returns the form for the step
"""
form = super().get_form(*args, **kwargs)
if self.show_timeout_error:
form.cleaned_data = getattr(form, 'cleaned_data', {})
form.add_error(None, ValidationError(_('Your session has timed out. Please login again.')))
return form
def get_device(self, step=None):
"""
Returns the OTP device selected by the user, or his default device.
"""
if not self.device_cache:
challenge_device_id = self.request.POST.get('challenge_device', None)
if challenge_device_id:
for device in backup_phones(self.get_user()):
if device.persistent_id == challenge_device_id:
self.device_cache = device
break
if step == 'backup':
try:
self.device_cache = self.get_user().staticdevice_set.get(name='backup')
except StaticDevice.DoesNotExist:
pass
if not self.device_cache:
self.device_cache = default_device(self.get_user())
return self.device_cache
def render(self, form=None, **kwargs):
"""
If the user selected a device, ask the device to generate a challenge;
either making a phone call or sending a text message.
"""
if self.steps.current == 'token':
self.get_device().generate_challenge()
return super().render(form, **kwargs)
def get_user(self):
"""
Returns the user authenticated by the AuthenticationForm. Returns False
if not a valid user; see also issue #65.
"""
if not self.user_cache:
self.user_cache = self.storage.authenticated_user
return self.user_cache
def get_context_data(self, form, **kwargs):
"""
Adds user's default and backup OTP devices to the context.
"""
context = super().get_context_data(form, **kwargs)
if self.steps.current == 'token':
context['device'] = self.get_device()
context['other_devices'] = [
phone for phone in backup_phones(self.get_user())
if phone != self.get_device()]
try:
context['backup_tokens'] = self.get_user().staticdevice_set\
.get(name='backup').token_set.count()
except StaticDevice.DoesNotExist:
context['backup_tokens'] = 0
if getattr(settings, 'LOGOUT_REDIRECT_URL', None):
context['cancel_url'] = resolve_url(settings.LOGOUT_REDIRECT_URL)
elif getattr(settings, 'LOGOUT_URL', None):
warnings.warn(
"LOGOUT_URL has been replaced by LOGOUT_REDIRECT_URL, please "
"review the URL and update your settings.",
DeprecationWarning)
context['cancel_url'] = resolve_url(settings.LOGOUT_URL)
return context
# Copied from django.conrib.auth.views.LoginView (Branch: stable/1.11.x)
# https://github.com/django/django/blob/58df8aa40fe88f753ba79e091a52f236246260b3/django/contrib/auth/views.py#L49
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
if self.redirect_authenticated_user and self.request.user.is_authenticated:
redirect_to = self.get_success_url()
if redirect_to == self.request.path:
raise ValueError(
"Redirection loop for authenticated user detected. Check that "
"your LOGIN_REDIRECT_URL doesn't point to a login page."
)
return HttpResponseRedirect(redirect_to)
return super().dispatch(request, *args, **kwargs)
@class_view_decorator(never_cache)
@class_view_decorator(login_required)
class SetupView(IdempotentSessionWizardView):
"""
View for handling OTP setup using a wizard.
The first step of the wizard shows an introduction text, explaining how OTP
works and why it should be enabled. The user has to select the verification
method (generator / call / sms) in the second step. Depending on the method
selected, the third step configures the device. For the generator method, a
QR code is shown which can be scanned using a mobile phone app and the user
is asked to provide a generated token. For call and sms methods, the user
provides the phone number which is then validated in the final step.
"""
success_url = 'two_factor:setup_complete'
qrcode_url = 'two_factor:qr'
template_name = 'two_factor/core/setup.html'
session_key_name = 'django_two_factor-qr_secret_key'
initial_dict = {}
form_list = (
('welcome', Form),
('method', MethodForm),
('generator', TOTPDeviceForm),
('sms', PhoneNumberForm),
('call', PhoneNumberForm),
('validation', DeviceValidationForm),
('yubikey', YubiKeyDeviceForm),
)
condition_dict = {
'generator': lambda self: self.get_method() == 'generator',
'call': lambda self: self.get_method() == 'call',
'sms': lambda self: self.get_method() == 'sms',
'validation': lambda self: self.get_method() in ('sms', 'call'),
'yubikey': lambda self: self.get_method() == 'yubikey',
}
idempotent_dict = {
'yubikey': False,
}
def get_method(self):
method_data = self.storage.validated_step_data.get('method', {})
return method_data.get('method', None)
def get(self, request, *args, **kwargs):
"""
Start the setup wizard. Redirect if already enabled.
"""
if default_device(self.request.user):
return redirect(self.success_url)
return super().get(request, *args, **kwargs)
def get_form_list(self):
"""
Check if there is only one method, then skip the MethodForm from form_list
"""
form_list = super().get_form_list()
available_methods = get_available_methods()
if len(available_methods) == 1:
form_list.pop('method', None)
method_key, _ = available_methods[0]
self.storage.validated_step_data['method'] = {'method': method_key}
return form_list
def render_next_step(self, form, **kwargs):
"""
In the validation step, ask the device to generate a challenge.
"""
next_step = self.steps.next
if next_step == 'validation':
try:
self.get_device().generate_challenge()
kwargs["challenge_succeeded"] = True
except Exception:
logger.exception("Could not generate challenge")
kwargs["challenge_succeeded"] = False
return super().render_next_step(form, **kwargs)
def done(self, form_list, **kwargs):
"""
Finish the wizard. Save all forms and redirect.
"""
# Remove secret key used for QR code generation
try:
del self.request.session[self.session_key_name]
except KeyError:
pass
# TOTPDeviceForm
if self.get_method() == 'generator':
form = [form for form in form_list if isinstance(form, TOTPDeviceForm)][0]
device = form.save()
# PhoneNumberForm / YubiKeyDeviceForm
elif self.get_method() in ('call', 'sms', 'yubikey'):
device = self.get_device()
device.save()
else:
raise NotImplementedError("Unknown method '%s'" % self.get_method())
django_otp.login(self.request, device)
return redirect(self.success_url)
def get_form_kwargs(self, step=None):
kwargs = {}
if step == 'generator':
kwargs.update({
'key': self.get_key(step),
'user': self.request.user,
})
if step in ('validation', 'yubikey'):
kwargs.update({
'device': self.get_device()
})
metadata = self.get_form_metadata(step)
if metadata:
kwargs.update({
'metadata': metadata,
})
return kwargs
def get_device(self, **kwargs):
"""
Uses the data from the setup step and generated key to recreate device.
Only used for call / sms -- generator uses other procedure.
"""
method = self.get_method()
kwargs = kwargs or {}
kwargs['name'] = 'default'
kwargs['user'] = self.request.user
if method in ('call', 'sms'):
kwargs['method'] = method
kwargs['number'] = self.storage.validated_step_data\
.get(method, {}).get('number')
return PhoneDevice(key=self.get_key(method), **kwargs)
if method == 'yubikey':
kwargs['public_id'] = self.storage.validated_step_data\
.get('yubikey', {}).get('token', '')[:-32]
try:
kwargs['service'] = ValidationService.objects.get(name='default')
except ValidationService.DoesNotExist:
raise KeyError("No ValidationService found with name 'default'")
except ValidationService.MultipleObjectsReturned:
raise KeyError("Multiple ValidationService found with name 'default'")
return RemoteYubikeyDevice(**kwargs)
def get_key(self, step):
self.storage.extra_data.setdefault('keys', {})
if step in self.storage.extra_data['keys']:
return self.storage.extra_data['keys'].get(step)
key = random_hex_str(20)
self.storage.extra_data['keys'][step] = key
return key
def get_context_data(self, form, **kwargs):
context = super().get_context_data(form, **kwargs)
if self.steps.current == 'generator':
key = self.get_key('generator')
rawkey = unhexlify(key.encode('ascii'))
b32key = b32encode(rawkey).decode('utf-8')
self.request.session[self.session_key_name] = b32key
context.update({
'QR_URL': reverse(self.qrcode_url)
})
elif self.steps.current == 'validation':
context['device'] = self.get_device()
context['cancel_url'] = resolve_url(settings.LOGIN_REDIRECT_URL)
return context
def process_step(self, form):
if hasattr(form, 'metadata'):
self.storage.extra_data.setdefault('forms', {})
self.storage.extra_data['forms'][self.steps.current] = form.metadata
return super().process_step(form)
def get_form_metadata(self, step):
self.storage.extra_data.setdefault('forms', {})
return self.storage.extra_data['forms'].get(step, None)
@class_view_decorator(never_cache)
@class_view_decorator(otp_required)
class BackupTokensView(FormView):
"""
View for listing and generating backup tokens.
A user can generate a number of static backup tokens. When the user loses
its phone, these backup tokens can be used for verification. These backup
tokens should be stored in a safe location; either in a safe or underneath
a pillow ;-).
"""
form_class = Form
success_url = 'two_factor:backup_tokens'
template_name = 'two_factor/core/backup_tokens.html'
number_of_tokens = 10
def get_device(self):
return self.request.user.staticdevice_set.get_or_create(name='backup')[0]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['device'] = self.get_device()
return context
def form_valid(self, form):
"""
Delete existing backup codes and generate new ones.
"""
device = self.get_device()
device.token_set.all().delete()
for n in range(self.number_of_tokens):
device.token_set.create(token=StaticToken.random_token())
return redirect(self.success_url)
@class_view_decorator(never_cache)
@class_view_decorator(otp_required)
class PhoneSetupView(IdempotentSessionWizardView):
"""
View for configuring a phone number for receiving tokens.
A user can have multiple backup :class:`~two_factor.models.PhoneDevice`
for receiving OTP tokens. If the primary phone number is not available, as
the battery might have drained or the phone is lost, these backup phone
numbers can be used for verification.
"""
template_name = 'two_factor/core/phone_register.html'
success_url = settings.LOGIN_REDIRECT_URL
form_list = (
('setup', PhoneNumberMethodForm),
('validation', DeviceValidationForm),
)
key_name = 'key'
def get(self, request, *args, **kwargs):
"""
Start the setup wizard. Redirect if no phone methods available.
"""
if not get_available_phone_methods():
return redirect(self.success_url)
return super().get(request, *args, **kwargs)
def done(self, form_list, **kwargs):
"""
Store the device and redirect to profile page.
"""
self.get_device(user=self.request.user, name='backup').save()
return redirect(self.success_url)
def render_next_step(self, form, **kwargs):
"""
In the validation step, ask the device to generate a challenge.
"""
next_step = self.steps.next
if next_step == 'validation':
self.get_device().generate_challenge()
return super().render_next_step(form, **kwargs)
def get_form_kwargs(self, step=None):
"""
Provide the device to the DeviceValidationForm.
"""
if step == 'validation':
return {'device': self.get_device()}
return {}
def get_device(self, **kwargs):
"""
Uses the data from the setup step and generated key to recreate device.
"""
kwargs = kwargs or {}
kwargs.update(self.storage.validated_step_data.get('setup', {}))
return PhoneDevice(key=self.get_key(), **kwargs)
def get_key(self):
"""
The key is preserved between steps and stored as ascii in the session.
"""
if self.key_name not in self.storage.extra_data:
key = random_hex_str(20)
self.storage.extra_data[self.key_name] = key
return self.storage.extra_data[self.key_name]
def get_context_data(self, form, **kwargs):
kwargs.setdefault('cancel_url', resolve_url(self.success_url))
return super().get_context_data(form, **kwargs)
@class_view_decorator(never_cache)
@class_view_decorator(otp_required)
class PhoneDeleteView(DeleteView):
"""
View for removing a phone number used for verification.
"""
success_url = settings.LOGIN_REDIRECT_URL
def get_queryset(self):
return self.request.user.phonedevice_set.filter(name='backup')
def get_success_url(self):
return resolve_url(self.success_url)
@class_view_decorator(never_cache)
@class_view_decorator(otp_required)
class SetupCompleteView(TemplateView):
"""
View congratulation the user when OTP setup has completed.
"""
template_name = 'two_factor/core/setup_complete.html'
def get_context_data(self):
return {
'phone_methods': get_available_phone_methods(),
}
@class_view_decorator(never_cache)
@class_view_decorator(login_required)
class QRGeneratorView(View):
"""
View returns an SVG image with the OTP token information
"""
http_method_names = ['get']
default_qr_factory = 'qrcode.image.svg.SvgPathImage'
session_key_name = 'django_two_factor-qr_secret_key'
# The qrcode library only supports PNG and SVG for now
image_content_types = {
'PNG': 'image/png',
'SVG': 'image/svg+xml; charset=utf-8',
}
def get_issuer(self):
return get_current_site(self.request).name
def get(self, request, *args, **kwargs):
# Get the data from the session
try:
key = self.request.session[self.session_key_name]
except KeyError:
raise Http404()
# Get data for qrcode
image_factory_string = getattr(settings, 'TWO_FACTOR_QR_FACTORY', self.default_qr_factory)
image_factory = import_string(image_factory_string)
content_type = self.image_content_types[image_factory.kind]
try:
username = self.request.user.get_username()
except AttributeError:
username = self.request.user.username
otpauth_url = get_otpauth_url(accountname=username,
issuer=self.get_issuer(),
secret=key,
digits=totp_digits())
# Make and return QR code
img = qrcode.make(otpauth_url, image_factory=image_factory)
resp = HttpResponse(content_type=content_type)
img.save(resp)
return resp
| ./CrossVul/dataset_final_sorted/CWE-312/py/good_4092_3 |
crossvul-python_data_good_4180_0 | ########################################################################
# File name: xhu.py
# This file is part of: xmpp-http-upload
#
# LICENSE
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
########################################################################
import contextlib
import errno
import fnmatch
import json
import hashlib
import hmac
import pathlib
import typing
import flask
import werkzeug.exceptions
app = flask.Flask("xmpp-http-upload")
app.config.from_envvar("XMPP_HTTP_UPLOAD_CONFIG")
application = app
if app.config['ENABLE_CORS']:
from flask_cors import CORS
CORS(app)
def get_paths(root: str, sub_path: str) \
-> typing.Tuple[pathlib.Path, pathlib.Path]:
base_path = flask.safe_join(root, sub_path)
data_file = pathlib.Path(base_path + ".data")
metadata_file = pathlib.Path(base_path + ".meta")
return data_file, metadata_file
def load_metadata(metadata_file):
with metadata_file.open("r") as f:
return json.load(f)
def get_info(path: str) -> typing.Tuple[
pathlib.Path,
dict]:
data_file, metadata_file = get_paths(app.config["DATA_ROOT"], path)
return data_file, load_metadata(metadata_file)
@contextlib.contextmanager
def write_file(at: pathlib.Path):
with at.open("xb") as f:
try:
yield f
except: # NOQA
at.unlink()
raise
@app.route("/")
def index():
return flask.Response(
"Welcome to XMPP HTTP Upload. State your business.",
mimetype="text/plain",
)
def stream_file(src, dest, nbytes):
while nbytes > 0:
data = src.read(min(nbytes, 4096))
if not data:
break
dest.write(data)
nbytes -= len(data)
if nbytes > 0:
raise EOFError
@app.route("/<path:path>", methods=["PUT"])
def put_file(path):
try:
data_file, metadata_file = get_paths(app.config["DATA_ROOT"], path)
except werkzeug.exceptions.NotFound:
return flask.Response(
"Not Found",
404,
mimetype="text/plain",
)
verification_key = flask.request.args.get("v", "")
length = int(flask.request.headers.get("Content-Length", 0))
hmac_input = "{} {}".format(path, length).encode("utf-8")
key = app.config["SECRET_KEY"]
mac = hmac.new(key, hmac_input, hashlib.sha256)
digest = mac.hexdigest()
if not hmac.compare_digest(digest, verification_key):
return flask.Response(
"Invalid verification key",
403,
mimetype="text/plain",
)
content_type = flask.request.headers.get(
"Content-Type",
"application/octet-stream",
)
data_file.parent.mkdir(parents=True, exist_ok=True, mode=0o770)
try:
with write_file(data_file) as fout:
stream_file(flask.request.stream, fout, length)
with metadata_file.open("x") as f:
json.dump(
{
"headers": {"Content-Type": content_type},
},
f,
)
except EOFError:
return flask.Response(
"Bad Request",
400,
mimetype="text/plain",
)
except OSError as exc:
if exc.errno == errno.EEXIST:
return flask.Response(
"Conflict",
409,
mimetype="text/plain",
)
raise
return flask.Response(
"Created",
201,
mimetype="text/plain",
)
def generate_headers(response_headers, metadata_headers):
for key, value in metadata_headers.items():
response_headers[key] = value
content_type = metadata_headers["Content-Type"]
for mimetype_glob in app.config.get("NON_ATTACHMENT_MIME_TYPES", []):
if fnmatch.fnmatch(content_type, mimetype_glob):
break
else:
response_headers["Content-Disposition"] = "attachment"
response_headers["X-Content-Type-Options"] = "nosniff"
response_headers["X-Frame-Options"] = "DENY"
response_headers["Content-Security-Policy"] = "default-src 'none'; frame-ancestors 'none'; sandbox"
@app.route("/<path:path>", methods=["HEAD"])
def head_file(path):
try:
data_file, metadata = get_info(path)
stat = data_file.stat()
except (OSError, werkzeug.exceptions.NotFound):
return flask.Response(
"Not Found",
404,
mimetype="text/plain",
)
response = flask.Response()
response.headers["Content-Length"] = str(stat.st_size)
generate_headers(
response.headers,
metadata["headers"],
)
return response
@app.route("/<path:path>", methods=["GET"])
def get_file(path):
try:
data_file, metadata = get_info(path)
except (OSError, werkzeug.exceptions.NotFound):
return flask.Response(
"Not Found",
404,
mimetype="text/plain",
)
response = flask.make_response(flask.send_file(
str(data_file),
))
generate_headers(
response.headers,
metadata["headers"],
)
return response
| ./CrossVul/dataset_final_sorted/CWE-22/py/good_4180_0 |
crossvul-python_data_good_495_0 | # -*- coding: utf-8 -*-
##############################################################################
# 2011 E2OpenPlugins #
# #
# This file is open source software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
##############################################################################
import os
import re
import glob
from urllib import quote
import json
from twisted.web import static, resource, http
from Components.config import config
from Tools.Directories import fileExists
from utilities import lenient_force_utf_8, sanitise_filename_slashes
def new_getRequestHostname(self):
host = self.getHeader(b'host')
if host:
if host[0]=='[':
return host.split(']',1)[0] + "]"
return host.split(':', 1)[0].encode('ascii')
return self.getHost().host.encode('ascii')
http.Request.getRequestHostname = new_getRequestHostname
class FileController(resource.Resource):
def render(self, request):
action = "download"
if "action" in request.args:
action = request.args["action"][0]
if "file" in request.args:
filename = lenient_force_utf_8(request.args["file"][0])
filename = sanitise_filename_slashes(os.path.realpath(filename))
if not os.path.exists(filename):
return "File '%s' not found" % (filename)
if action == "stream":
name = "stream"
if "name" in request.args:
name = request.args["name"][0]
port = config.OpenWebif.port.value
proto = 'http'
if request.isSecure():
port = config.OpenWebif.https_port.value
proto = 'https'
ourhost = request.getHeader('host')
m = re.match('.+\:(\d+)$', ourhost)
if m is not None:
port = m.group(1)
response = "#EXTM3U\n#EXTVLCOPT--http-reconnect=true\n#EXTINF:-1,%s\n%s://%s:%s/file?action=download&file=%s" % (name, proto, request.getRequestHostname(), port, quote(filename))
request.setHeader("Content-Disposition", 'attachment;filename="%s.m3u"' % name)
request.setHeader("Content-Type", "application/x-mpegurl")
return response
elif action == "delete":
request.setResponseCode(http.OK)
return "TODO: DELETE FILE: %s" % (filename)
elif action == "download":
request.setHeader("Content-Disposition", "attachment;filename=\"%s\"" % (filename.split('/')[-1]))
rfile = static.File(filename, defaultType = "application/octet-stream")
return rfile.render(request)
else:
return "wrong action parameter"
if "dir" in request.args:
path = request.args["dir"][0]
pattern = '*'
data = []
if "pattern" in request.args:
pattern = request.args["pattern"][0]
directories = []
files = []
if fileExists(path):
try:
files = glob.glob(path+'/'+pattern)
except:
files = []
files.sort()
tmpfiles = files[:]
for x in tmpfiles:
if os.path.isdir(x):
directories.append(x + '/')
files.remove(x)
data.append({"result": True,"dirs": directories,"files": files})
else:
data.append({"result": False,"message": "path %s not exits" % (path)})
request.setHeader("content-type", "application/json; charset=utf-8")
return json.dumps(data, indent=2)
| ./CrossVul/dataset_final_sorted/CWE-22/py/good_495_0 |
crossvul-python_data_good_115_1 | #!/usr/bin/python -tt
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# copyright 2006 Duke University
# author seth vidal
# sync all or the newest packages from a repo to the local path
# TODO:
# have it print out list of changes
# make it work with mirrorlists (silly, really)
# man page/more useful docs
# deal nicely with a package changing but not changing names (ie: replacement)
# criteria
# if a package is not the same and smaller then reget it
# if a package is not the same and larger, delete it and get it again
# always replace metadata files if they're not the same.
import os
import sys
import shutil
import stat
from optparse import OptionParser
from urlparse import urljoin
from yumutils.i18n import _
import yum
import yum.Errors
from yum.packageSack import ListPackageSack
import rpmUtils.arch
import logging
from urlgrabber.progress import TextMeter, TextMultiFileMeter
import urlgrabber
class RepoSync(yum.YumBase):
def __init__(self, opts):
yum.YumBase.__init__(self)
self.logger = logging.getLogger('yum.verbose.reposync')
self.opts = opts
def localpkgs(directory):
names = os.listdir(directory)
cache = {}
for name in names:
fn = os.path.join(directory, name)
try:
st = os.lstat(fn)
except os.error:
continue
if stat.S_ISDIR(st.st_mode):
subcache = localpkgs(fn)
for pkg in subcache.keys():
cache[pkg] = subcache[pkg]
elif stat.S_ISREG(st.st_mode) and name.endswith(".rpm"):
cache[name] = {'path': fn, 'size': st.st_size, 'device': st.st_dev}
return cache
def is_subpath(path, root):
root = os.path.realpath(root)
path = os.path.realpath(os.path.join(root, path))
# join() is used below to ensure root ends with a slash
return path.startswith(os.path.join(root, ''))
def parseArgs():
usage = _("""
Reposync is used to synchronize a remote yum repository to a local
directory using yum to retrieve the packages.
%s [options]
""") % sys.argv[0]
parser = OptionParser(usage=usage)
parser.add_option("-c", "--config", default='/etc/yum.conf',
help=_('config file to use (defaults to /etc/yum.conf)'))
parser.add_option("-a", "--arch", default=None,
help=_('act as if running the specified arch (default: current arch, note: does not override $releasever. x86_64 is a superset for i*86.)'))
parser.add_option("--source", default=False, dest="source", action="store_true",
help=_('operate on source packages'))
parser.add_option("-r", "--repoid", default=[], action='append',
help=_("specify repo ids to query, can be specified multiple times (default is all enabled)"))
parser.add_option("-e", "--cachedir",
help=_("directory in which to store metadata"))
parser.add_option("-t", "--tempcache", default=False, action="store_true",
help=_("Use a temp dir for storing/accessing yum-cache"))
parser.add_option("-d", "--delete", default=False, action="store_true",
help=_("delete local packages no longer present in repository"))
parser.add_option("-p", "--download_path", dest='destdir',
default=os.getcwd(), help=_("Path to download packages to: defaults to current dir"))
parser.add_option("--norepopath", dest='norepopath', default=False, action="store_true",
help=_("Don't add the reponame to the download path. Can only be used when syncing a single repository (default is to add the reponame)"))
parser.add_option("-g", "--gpgcheck", default=False, action="store_true",
help=_("Remove packages that fail GPG signature checking after downloading"))
parser.add_option("-u", "--urls", default=False, action="store_true",
help=_("Just list urls of what would be downloaded, don't download"))
parser.add_option("-n", "--newest-only", dest='newest', default=False, action="store_true",
help=_("Download only newest packages per-repo"))
parser.add_option("-q", "--quiet", default=False, action="store_true",
help=_("Output as little as possible"))
parser.add_option("-l", "--plugins", default=False, action="store_true",
help=_("enable yum plugin support"))
parser.add_option("-m", "--downloadcomps", default=False, action="store_true",
help=_("also download comps.xml"))
parser.add_option("", "--download-metadata", dest="downloadmd",
default=False, action="store_true",
help=_("download all the non-default metadata"))
parser.add_option("", "--allow-path-traversal", default=False,
action="store_true",
help=_("Allow packages stored outside their repo directory to be synced "
"(UNSAFE, USE WITH CAUTION!)"))
(opts, args) = parser.parse_args()
return (opts, args)
def main():
(opts, dummy) = parseArgs()
if not os.path.exists(opts.destdir) and not opts.urls:
try:
os.makedirs(opts.destdir)
except OSError, e:
print >> sys.stderr, _("Error: Cannot create destination dir %s") % opts.destdir
sys.exit(1)
if not os.access(opts.destdir, os.W_OK) and not opts.urls:
print >> sys.stderr, _("Error: Cannot write to destination dir %s") % opts.destdir
sys.exit(1)
my = RepoSync(opts=opts)
my.doConfigSetup(fn=opts.config, init_plugins=opts.plugins)
# Force unprivileged users to have a private temporary cachedir
# if they've not given an explicit cachedir
if os.getuid() != 0 and not opts.cachedir:
opts.tempcache = True
if opts.tempcache:
if not my.setCacheDir(force=True, reuse=False):
print >> sys.stderr, _("Error: Could not make cachedir, exiting")
sys.exit(50)
my.conf.uid = 1 # force locking of user cache
elif opts.cachedir:
my.repos.setCacheDir(opts.cachedir)
# Lock if they've not given an explicit cachedir
if not opts.cachedir:
try:
my.doLock()
except yum.Errors.LockError, e:
print >> sys.stderr, _("Error: %s") % e
sys.exit(50)
# Use progress bar display when downloading repo metadata
# and package files ... needs to be setup before .repos (ie. RHN/etc.).
if not opts.quiet:
my.repos.setProgressBar(TextMeter(fo=sys.stdout), TextMultiFileMeter(fo=sys.stdout))
my.doRepoSetup()
if len(opts.repoid) > 0:
myrepos = []
# find the ones we want
for glob in opts.repoid:
add_repos = my.repos.findRepos(glob)
if not add_repos:
print >> sys.stderr, _("Warning: cannot find repository %s") % glob
continue
myrepos.extend(add_repos)
if not myrepos:
print >> sys.stderr, _("No repositories found")
sys.exit(1)
# disable them all
for repo in my.repos.repos.values():
repo.disable()
# enable the ones we like
for repo in myrepos:
repo.enable()
# --norepopath can only be sensibly used with a single repository:
if len(my.repos.listEnabled()) > 1 and opts.norepopath:
print >> sys.stderr, _("Error: Can't use --norepopath with multiple repositories")
sys.exit(1)
try:
arches = rpmUtils.arch.getArchList(opts.arch)
if opts.source:
arches += ['src']
my.doSackSetup(arches)
except yum.Errors.RepoError, e:
print >> sys.stderr, _("Error setting up repositories: %s") % e
# maybe this shouldn't be entirely fatal
sys.exit(1)
exit_code = 0
for repo in my.repos.listEnabled():
reposack = ListPackageSack(my.pkgSack.returnPackages(repoid=repo.id))
if opts.newest:
download_list = reposack.returnNewestByNameArch()
else:
download_list = list(reposack)
if opts.norepopath:
local_repo_path = opts.destdir
else:
local_repo_path = opts.destdir + '/' + repo.id
# Ensure we don't traverse out of local_repo_path by dropping any
# packages whose remote_path is absolute or contains up-level
# references (unless explicitly allowed).
# See RHBZ#1600221 for details.
if not opts.allow_path_traversal:
newlist = []
skipped = False
for pkg in download_list:
if is_subpath(pkg.remote_path, local_repo_path):
newlist.append(pkg)
continue
my.logger.warning(
_('WARNING: skipping package %s: remote path "%s" not '
'within repodir, unsafe to mirror locally')
% (pkg, pkg.remote_path)
)
skipped = True
if skipped:
my.logger.info(
_('You can enable unsafe remote paths by using '
'--allow-path-traversal (see reposync(1) for details)')
)
download_list = newlist
if opts.delete and os.path.exists(local_repo_path):
current_pkgs = localpkgs(local_repo_path)
download_set = {}
for pkg in download_list:
rpmname = os.path.basename(pkg.remote_path)
download_set[rpmname] = 1
for pkg in current_pkgs:
if pkg in download_set:
continue
if not opts.quiet:
my.logger.info("Removing obsolete %s", pkg)
os.unlink(current_pkgs[pkg]['path'])
if opts.downloadcomps or opts.downloadmd:
if not os.path.exists(local_repo_path):
try:
os.makedirs(local_repo_path)
except IOError, e:
my.logger.error("Could not make repo subdir: %s" % e)
my.closeRpmDB()
sys.exit(1)
if opts.downloadcomps:
wanted_types = ['group']
if opts.downloadmd:
wanted_types = repo.repoXML.fileTypes()
for ftype in repo.repoXML.fileTypes():
if ftype in ['primary', 'primary_db', 'filelists',
'filelists_db', 'other', 'other_db']:
continue
if ftype not in wanted_types:
continue
try:
resultfile = repo.retrieveMD(ftype)
basename = os.path.basename(resultfile)
if ftype == 'group' and opts.downloadcomps: # for compat with how --downloadcomps saved the comps file always as comps.xml
basename = 'comps.xml'
shutil.copyfile(resultfile, "%s/%s" % (local_repo_path, basename))
except yum.Errors.RepoMDError, e:
if not opts.quiet:
my.logger.error("Unable to fetch metadata: %s" % e)
remote_size = 0
if not opts.urls:
for pkg in download_list:
local = os.path.join(local_repo_path, pkg.remote_path)
sz = int(pkg.returnSimple('packagesize'))
if os.path.exists(local) and os.path.getsize(local) == sz:
continue
remote_size += sz
if hasattr(urlgrabber.progress, 'text_meter_total_size'):
urlgrabber.progress.text_meter_total_size(remote_size)
download_list.sort(key=lambda pkg: pkg.name)
if opts.urls:
for pkg in download_list:
local = os.path.join(local_repo_path, pkg.remote_path)
if not (os.path.exists(local) and my.verifyPkg(local, pkg, False)):
print urljoin(pkg.repo.urls[0], pkg.remote_path)
continue
# create dest dir
if not os.path.exists(local_repo_path):
os.makedirs(local_repo_path)
# set localpaths
for pkg in download_list:
pkg.localpath = os.path.join(local_repo_path, pkg.remote_path)
pkg.repo.copy_local = True
pkg.repo.cache = 0
localdir = os.path.dirname(pkg.localpath)
if not os.path.exists(localdir):
os.makedirs(localdir)
# use downloader from YumBase
probs = my.downloadPkgs(download_list)
if probs:
exit_code = 1
for key in probs:
for error in probs[key]:
my.logger.error('%s: %s', key, error)
if opts.gpgcheck:
for pkg in download_list:
result, error = my.sigCheckPkg(pkg)
if result != 0:
rpmfn = os.path.basename(pkg.remote_path)
if result == 1:
my.logger.warning('Removing %s, due to missing GPG key.' % rpmfn)
elif result == 2:
my.logger.warning('Removing %s due to failed signature check.' % rpmfn)
else:
my.logger.warning('Removing %s due to failed signature check: %s' % rpmfn)
os.unlink(pkg.localpath)
exit_code = 1
continue
my.closeRpmDB()
sys.exit(exit_code)
if __name__ == "__main__":
main()
| ./CrossVul/dataset_final_sorted/CWE-22/py/good_115_1 |
crossvul-python_data_bad_495_1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
RESTful Filesystem access using HTTP
------------------------------------
This controller and helper classes exposes parts or all of the server's
filesystem. Means to retrieve and delete files are provided as well as the
ability to list folder contents.
The generated responses are returned as JSON data with appropriate HTTP headers.
Output will be compressed using gzip most of the times.
Example calls using curl
++++++++++++++++++++++++
The following examples assume that the FileController instance is accessible
as '/file' on 'localhost', port 18888 (http://localhost:18888/file).
Fetch list of files and folders in root folder:
curl --noproxy localhost -iv http://localhost:18888/file
Fetch example file 'example.txt'
curl --noproxy localhost -iv http://localhost:18888/file/example.txt
Fetch gzipped example file 'example.txt'
curl --compressed -H "Accept-Encoding: gzip" --noproxy localhost -iv http://localhost:18888/file/example.txt
Delete example file 'example.txt'
curl --noproxy localhost -iv -X DELETE http://localhost:18888/file/example.txt
"""
import os
import json
import glob
import re
import urlparse
import twisted.web.static
from twisted.web import http
import file
MANY_SLASHES_PATTERN = r'[\/]+'
MANY_SLASHES_REGEX = re.compile(MANY_SLASHES_PATTERN)
#: default path from which files will be served
DEFAULT_ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
#: CORS - HTTP headers the client may use
CORS_ALLOWED_CLIENT_HEADERS = [
'Content-Type',
]
#: CORS - HTTP methods the client may use
CORS_ALLOWED_METHODS_DEFAULT = ['GET', 'PUT', 'POST', 'DELETE', 'OPTIONS']
#: CORS - default origin header value
CORS_DEFAULT_ALLOW_ORIGIN = '*'
#: CORS - HTTP headers the server will send as part of OPTIONS response
CORS_DEFAULT = {
'Access-Control-Allow-Origin': CORS_DEFAULT_ALLOW_ORIGIN,
'Access-Control-Allow-Credentials': 'true',
'Access-Control-Max-Age': '86400',
'Access-Control-Allow-Methods': ','.join(CORS_ALLOWED_METHODS_DEFAULT),
'Access-Control-Allow-Headers': ', '.join(CORS_ALLOWED_CLIENT_HEADERS)
}
#: paths where file delete operations shall be allowed
DELETE_WHITELIST = [
'/media',
]
class FileController(twisted.web.resource.Resource):
isLeaf = True
_override_args = (
'resource_prefix', 'root', 'do_delete', 'delete_whitelist')
_resource_prefix = '/file'
_root = os.path.abspath(os.path.dirname(__file__))
_do_delete = False
_delete_whitelist = DELETE_WHITELIST
never_gzip_extensions = ('.ts',)
def __init__(self, *args, **kwargs):
"""
Default Constructor.
Args:
resource_prefix: Prefix value for this controller instance.
Default is :py:data:`FileController._resource_prefix`
root: Root path of files to be served.
Default is the path where the current file is located
do_delete: Try to actually delete files?
Default is False.
delete_whitelist: Folder prefixes where delete operations are
allowed _at all_. Default is :py:data:`DELETE_WHITELIST`
"""
if args:
for key, value in zip(self._override_args, args):
kwargs[key] = value
for arg_name in self._override_args:
if kwargs.get(arg_name) is not None:
attr_name = '_{:s}'.format(arg_name)
setattr(self, attr_name, kwargs.get(arg_name))
self.session = kwargs.get("session")
def _json_response(self, request, data):
"""
Create a JSON representation for *data* and set HTTP headers indicating
that JSON encoded data is returned.
Args:
request (twisted.web.server.Request): HTTP request object
data: response content
Returns:
JSON representation of *data* with appropriate HTTP headers
"""
request.setHeader("content-type", "application/json; charset=utf-8")
return json.dumps(data, indent=2)
def get_response_data_template(self, request):
"""
Generate a response data :class:`dict` containing default values and
some request attribute values for debugging purposes.
Args:
request (twisted.web.server.Request): HTTP request object
Returns:
(dict) response template data
"""
file_path = None
if request.path.startswith(self._resource_prefix):
file_path = request.path[len(self._resource_prefix):]
response_data = {
"_request": {
"path": request.path,
"uri": request.uri,
"method": request.method,
"postpath": request.postpath,
"file_path": file_path,
},
"result": False,
}
return response_data
def error_response(self, request, response_code=None, **kwargs):
"""
Create and return an HTTP error response with data as JSON.
Args:
request (twisted.web.server.Request): HTTP request object
response_code: HTTP Status Code (default is 500)
**kwargs: additional key/value pairs
Returns:
JSON encoded data with appropriate HTTP headers
"""
if response_code is None:
response_code = http.INTERNAL_SERVER_ERROR
response_data = self.get_response_data_template(request)
response_data.update(**kwargs)
response_data['me'] = dict()
for arg_name in self._override_args:
attr_name = '_{:s}'.format(arg_name)
response_data['me'][attr_name] = getattr(self, attr_name)
request.setResponseCode(response_code)
return self._json_response(request, response_data)
def _existing_path_or_bust(self, request):
"""
Verify that a filesystem location which is contained in *request.path*
is valid and an existing path.
Args:
request (twisted.web.server.Request): HTTP request object
Returns:
path
Raises:
ValueError: If contained path value is invalid.
IOError: If contained path value is not existing.
"""
rq_path = urlparse.unquote(request.path)
if not rq_path.startswith(self._resource_prefix):
raise ValueError("Invalid Request Path {!r}".format(request.path))
file_path = os.path.join(
self._root, rq_path[len(self._resource_prefix) + 1:])
file_path = re.sub(MANY_SLASHES_REGEX, '/', file_path)
if not os.path.exists(file_path):
raise IOError("Not Found {!r}".format(file_path))
return file_path
def render_OPTIONS(self, request):
"""
Render response for an HTTP OPTIONS request.
Example request
curl -iv --noproxy localhost http://localhost:18888/file
Args:
request (twisted.web.server.Request): HTTP request object
Returns:
HTTP response with headers
"""
for key in CORS_DEFAULT:
request.setHeader(key, CORS_DEFAULT[key])
return ''
def render_legacy(self, request):
"""
Render response for an HTTP GET request. In order to maintain
backward compatibility this method emulates the behaviour of the
legacy method implementation.
Args:
request (twisted.web.server.Request): HTTP request object
Returns:
HTTP response with headers
"""
return file.FileController().render(request)
def _glob(self, path, pattern='*'):
if path == '/':
glob_me = '/' + pattern
else:
glob_me = '/'.join((path, pattern))
return glob.iglob(glob_me)
def _walk(self, path):
for root, dirs, files in os.walk(path):
for dir_item in dirs:
yield os.path.join(root, dir_item)
for file_item in files:
yield os.path.join(root, file_item)
def render_path_listing(self, request, path):
"""
Generate a file/folder listing of *path*'s contents.
Args:
request (twisted.web.server.Request): HTTP request object
path: folder location
Returns:
HTTP response with headers
"""
response_data = self.get_response_data_template(request)
response_data.update(
{
'result': True,
'dirs': [],
'files': [],
}
)
generator = None
if "pattern" in request.args:
generator = self._glob(path, request.args["pattern"][0])
if "recursive" in request.args:
generator = self._walk(path)
if generator is None:
generator = self._glob(path)
for item in generator:
if os.path.isdir(item):
response_data['dirs'].append(item)
else:
response_data['files'].append(item)
return self._json_response(request, response_data)
def render_file(self, request, path):
"""
Return the contents of file *path*.
Args:
request (twisted.web.server.Request): HTTP request object
path: file path
Returns:
HTTP response with headers
"""
(_, ext) = os.path.splitext(path)
if ext in self.never_gzip_extensions:
# hack: remove gzip from the list of supported encodings
acceptHeaders = request.requestHeaders.getRawHeaders(
'accept-encoding', [])
supported = ','.join(acceptHeaders).split(',')
request.requestHeaders.setRawHeaders(
'accept-encoding', list(set(supported) - {'gzip'}))
result = twisted.web.static.File(
path, defaultType="application/octet-stream")
return result.render(request)
def render_GET(self, request):
"""
HTTP GET request handler returning
* legacy response if the query *file* or *dir* parameter is set
* file contents if *request.path* contains a file path
* directory listing if *request.path* contains a folder path
Args:
request (twisted.web.server.Request): HTTP request object
Returns:
HTTP response with headers
"""
attic_args = {'file', 'dir'}
if len(attic_args & set(request.args.keys())) >= 1:
return self.render_legacy(request)
request.setHeader(
'Access-Control-Allow-Origin', CORS_DEFAULT_ALLOW_ORIGIN)
try:
target_path = self._existing_path_or_bust(request)
except ValueError as vexc:
return self.error_response(
request, response_code=http.BAD_REQUEST, message=vexc.message)
except IOError as iexc:
return self.error_response(
request, response_code=http.NOT_FOUND, message=iexc.message)
if os.path.isdir(target_path):
return self.render_path_listing(request, target_path)
else:
return self.render_file(request, target_path)
def render_POST(self, request):
"""
HTTP POST request handler (currently NOT implemented).
Args:
request (twisted.web.server.Request): HTTP request object
Returns:
HTTP response with headers
"""
request.setHeader(
'Access-Control-Allow-Origin', CORS_DEFAULT_ALLOW_ORIGIN)
return self.error_response(request, response_code=http.NOT_IMPLEMENTED)
def render_PUT(self, request):
"""
HTTP PUT request handler (currently NOT implemented).
Args:
request (twisted.web.server.Request): HTTP request object
Returns:
HTTP response with headers
"""
request.setHeader(
'Access-Control-Allow-Origin', CORS_DEFAULT_ALLOW_ORIGIN)
return self.error_response(request, response_code=http.NOT_IMPLEMENTED)
def render_DELETE(self, request):
"""
HTTP DELETE request handler which may try to delete a file if its
path's prefix is in :py:data:`FileController._delete_whitelist` and
:py:data:`FileController._do_delete` is True.
Args:
request (twisted.web.server.Request): HTTP request object
Returns:
HTTP response with headers
"""
request.setHeader(
'Access-Control-Allow-Origin', CORS_DEFAULT_ALLOW_ORIGIN)
try:
target_path = self._existing_path_or_bust(request)
except ValueError as vexc:
return self.error_response(
request, response_code=http.BAD_REQUEST, message=vexc.message)
except IOError as iexc:
return self.error_response(
request, response_code=http.NOT_FOUND, message=iexc.message)
if os.path.isdir(target_path):
return self.error_response(
request, response_code=http.NOT_IMPLEMENTED,
message='Will not remove folder {!r}'.format(target_path))
for prefix in self._delete_whitelist:
if not target_path.startswith(os.path.abspath(prefix)):
return self.error_response(request,
response_code=http.FORBIDDEN)
response_data = self.get_response_data_template(request)
try:
response_data['result'] = True
if self._do_delete:
os.unlink(target_path)
message = 'Removed {!r}'.format(target_path)
else:
message = 'WOULD remove {!r}'.format(target_path)
response_data['message'] = message
except Exception as eexc:
response_data['message'] = 'Cannot remove {!r}: {!s}'.format(
target_path, eexc.message)
request.setResponseCode(http.INTERNAL_SERVER_ERROR)
return self._json_response(request, response_data)
if __name__ == '__main__':
from twisted.web.resource import Resource, EncodingResourceWrapper
from twisted.web.server import Site, GzipEncoderFactory
from twisted.internet import reactor
# standard factory example
factory_s = Site(FileController(DEFAULT_ROOT_PATH))
# experimental factory
root = Resource()
root.putChild("/", FileController)
root.putChild("/file", FileController)
factory_r = Site(root)
# experimental factory: enable gzip compression
wrapped = EncodingResourceWrapper(
FileController(
root=DEFAULT_ROOT_PATH,
# DANGER, WILL ROBINSON! These values allow deletion of ALL files!
do_delete=True, delete_whitelist=[]
),
[GzipEncoderFactory()])
factory_s_gz = Site(wrapped)
reactor.listenTCP(18888, factory_s_gz)
reactor.run()
| ./CrossVul/dataset_final_sorted/CWE-22/py/bad_495_1 |
crossvul-python_data_bad_116_0 | #!/usr/bin/python -tt
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# copyright 2006 Duke University
# author seth vidal
# sync all or the newest packages from a repo to the local path
# TODO:
# have it print out list of changes
# make it work with mirrorlists (silly, really)
# man page/more useful docs
# deal nicely with a package changing but not changing names (ie: replacement)
# criteria
# if a package is not the same and smaller then reget it
# if a package is not the same and larger, delete it and get it again
# always replace metadata files if they're not the same.
import os
import sys
import shutil
import stat
from optparse import OptionParser
from urlparse import urljoin
from yumutils.i18n import _
import yum
import yum.Errors
from yum.packageSack import ListPackageSack
import rpmUtils.arch
import logging
from urlgrabber.progress import TextMeter, TextMultiFileMeter
import urlgrabber
class RepoSync(yum.YumBase):
def __init__(self, opts):
yum.YumBase.__init__(self)
self.logger = logging.getLogger('yum.verbose.reposync')
self.opts = opts
def localpkgs(directory):
names = os.listdir(directory)
cache = {}
for name in names:
fn = os.path.join(directory, name)
try:
st = os.lstat(fn)
except os.error:
continue
if stat.S_ISDIR(st.st_mode):
subcache = localpkgs(fn)
for pkg in subcache.keys():
cache[pkg] = subcache[pkg]
elif stat.S_ISREG(st.st_mode) and name.endswith(".rpm"):
cache[name] = {'path': fn, 'size': st.st_size, 'device': st.st_dev}
return cache
def parseArgs():
usage = _("""
Reposync is used to synchronize a remote yum repository to a local
directory using yum to retrieve the packages.
%s [options]
""") % sys.argv[0]
parser = OptionParser(usage=usage)
parser.add_option("-c", "--config", default='/etc/yum.conf',
help=_('config file to use (defaults to /etc/yum.conf)'))
parser.add_option("-a", "--arch", default=None,
help=_('act as if running the specified arch (default: current arch, note: does not override $releasever. x86_64 is a superset for i*86.)'))
parser.add_option("--source", default=False, dest="source", action="store_true",
help=_('operate on source packages'))
parser.add_option("-r", "--repoid", default=[], action='append',
help=_("specify repo ids to query, can be specified multiple times (default is all enabled)"))
parser.add_option("-e", "--cachedir",
help=_("directory in which to store metadata"))
parser.add_option("-t", "--tempcache", default=False, action="store_true",
help=_("Use a temp dir for storing/accessing yum-cache"))
parser.add_option("-d", "--delete", default=False, action="store_true",
help=_("delete local packages no longer present in repository"))
parser.add_option("-p", "--download_path", dest='destdir',
default=os.getcwd(), help=_("Path to download packages to: defaults to current dir"))
parser.add_option("--norepopath", dest='norepopath', default=False, action="store_true",
help=_("Don't add the reponame to the download path. Can only be used when syncing a single repository (default is to add the reponame)"))
parser.add_option("-g", "--gpgcheck", default=False, action="store_true",
help=_("Remove packages that fail GPG signature checking after downloading"))
parser.add_option("-u", "--urls", default=False, action="store_true",
help=_("Just list urls of what would be downloaded, don't download"))
parser.add_option("-n", "--newest-only", dest='newest', default=False, action="store_true",
help=_("Download only newest packages per-repo"))
parser.add_option("-q", "--quiet", default=False, action="store_true",
help=_("Output as little as possible"))
parser.add_option("-l", "--plugins", default=False, action="store_true",
help=_("enable yum plugin support"))
parser.add_option("-m", "--downloadcomps", default=False, action="store_true",
help=_("also download comps.xml"))
parser.add_option("", "--download-metadata", dest="downloadmd",
default=False, action="store_true",
help=_("download all the non-default metadata"))
(opts, args) = parser.parse_args()
return (opts, args)
def main():
(opts, dummy) = parseArgs()
if not os.path.exists(opts.destdir) and not opts.urls:
try:
os.makedirs(opts.destdir)
except OSError, e:
print >> sys.stderr, _("Error: Cannot create destination dir %s") % opts.destdir
sys.exit(1)
if not os.access(opts.destdir, os.W_OK) and not opts.urls:
print >> sys.stderr, _("Error: Cannot write to destination dir %s") % opts.destdir
sys.exit(1)
my = RepoSync(opts=opts)
my.doConfigSetup(fn=opts.config, init_plugins=opts.plugins)
# Force unprivileged users to have a private temporary cachedir
# if they've not given an explicit cachedir
if os.getuid() != 0 and not opts.cachedir:
opts.tempcache = True
if opts.tempcache:
if not my.setCacheDir(force=True, reuse=False):
print >> sys.stderr, _("Error: Could not make cachedir, exiting")
sys.exit(50)
my.conf.uid = 1 # force locking of user cache
elif opts.cachedir:
my.repos.setCacheDir(opts.cachedir)
# Lock if they've not given an explicit cachedir
if not opts.cachedir:
try:
my.doLock()
except yum.Errors.LockError, e:
print >> sys.stderr, _("Error: %s") % e
sys.exit(50)
# Use progress bar display when downloading repo metadata
# and package files ... needs to be setup before .repos (ie. RHN/etc.).
if not opts.quiet:
my.repos.setProgressBar(TextMeter(fo=sys.stdout), TextMultiFileMeter(fo=sys.stdout))
my.doRepoSetup()
if len(opts.repoid) > 0:
myrepos = []
# find the ones we want
for glob in opts.repoid:
add_repos = my.repos.findRepos(glob)
if not add_repos:
print >> sys.stderr, _("Warning: cannot find repository %s") % glob
continue
myrepos.extend(add_repos)
if not myrepos:
print >> sys.stderr, _("No repositories found")
sys.exit(1)
# disable them all
for repo in my.repos.repos.values():
repo.disable()
# enable the ones we like
for repo in myrepos:
repo.enable()
# --norepopath can only be sensibly used with a single repository:
if len(my.repos.listEnabled()) > 1 and opts.norepopath:
print >> sys.stderr, _("Error: Can't use --norepopath with multiple repositories")
sys.exit(1)
try:
arches = rpmUtils.arch.getArchList(opts.arch)
if opts.source:
arches += ['src']
my.doSackSetup(arches)
except yum.Errors.RepoError, e:
print >> sys.stderr, _("Error setting up repositories: %s") % e
# maybe this shouldn't be entirely fatal
sys.exit(1)
exit_code = 0
for repo in my.repos.listEnabled():
reposack = ListPackageSack(my.pkgSack.returnPackages(repoid=repo.id))
if opts.newest:
download_list = reposack.returnNewestByNameArch()
else:
download_list = list(reposack)
if opts.norepopath:
local_repo_path = opts.destdir
else:
local_repo_path = opts.destdir + '/' + repo.id
if opts.delete and os.path.exists(local_repo_path):
current_pkgs = localpkgs(local_repo_path)
download_set = {}
for pkg in download_list:
remote = pkg.returnSimple('relativepath')
rpmname = os.path.basename(remote)
download_set[rpmname] = 1
for pkg in current_pkgs:
if pkg in download_set:
continue
if not opts.quiet:
my.logger.info("Removing obsolete %s", pkg)
os.unlink(current_pkgs[pkg]['path'])
if opts.downloadcomps or opts.downloadmd:
if not os.path.exists(local_repo_path):
try:
os.makedirs(local_repo_path)
except IOError, e:
my.logger.error("Could not make repo subdir: %s" % e)
my.closeRpmDB()
sys.exit(1)
if opts.downloadcomps:
wanted_types = ['group']
if opts.downloadmd:
wanted_types = repo.repoXML.fileTypes()
for ftype in repo.repoXML.fileTypes():
if ftype in ['primary', 'primary_db', 'filelists',
'filelists_db', 'other', 'other_db']:
continue
if ftype not in wanted_types:
continue
try:
resultfile = repo.retrieveMD(ftype)
basename = os.path.basename(resultfile)
if ftype == 'group' and opts.downloadcomps: # for compat with how --downloadcomps saved the comps file always as comps.xml
basename = 'comps.xml'
shutil.copyfile(resultfile, "%s/%s" % (local_repo_path, basename))
except yum.Errors.RepoMDError, e:
if not opts.quiet:
my.logger.error("Unable to fetch metadata: %s" % e)
remote_size = 0
if not opts.urls:
for pkg in download_list:
remote = pkg.returnSimple('relativepath')
local = local_repo_path + '/' + remote
sz = int(pkg.returnSimple('packagesize'))
if os.path.exists(local) and os.path.getsize(local) == sz:
continue
remote_size += sz
if hasattr(urlgrabber.progress, 'text_meter_total_size'):
urlgrabber.progress.text_meter_total_size(remote_size)
download_list.sort(key=lambda pkg: pkg.name)
if opts.urls:
for pkg in download_list:
remote = pkg.returnSimple('relativepath')
local = os.path.join(local_repo_path, remote)
if not (os.path.exists(local) and my.verifyPkg(local, pkg, False)):
print urljoin(pkg.repo.urls[0], pkg.relativepath)
continue
# create dest dir
if not os.path.exists(local_repo_path):
os.makedirs(local_repo_path)
# set localpaths
for pkg in download_list:
rpmfn = pkg.remote_path
pkg.localpath = os.path.join(local_repo_path, rpmfn)
pkg.repo.copy_local = True
pkg.repo.cache = 0
localdir = os.path.dirname(pkg.localpath)
if not os.path.exists(localdir):
os.makedirs(localdir)
# use downloader from YumBase
probs = my.downloadPkgs(download_list)
if probs:
exit_code = 1
for key in probs:
for error in probs[key]:
my.logger.error('%s: %s', key, error)
if opts.gpgcheck:
for pkg in download_list:
result, error = my.sigCheckPkg(pkg)
if result != 0:
rpmfn = os.path.basename(pkg.remote_path)
if result == 1:
my.logger.warning('Removing %s, due to missing GPG key.' % rpmfn)
elif result == 2:
my.logger.warning('Removing %s due to failed signature check.' % rpmfn)
else:
my.logger.warning('Removing %s due to failed signature check: %s' % rpmfn)
os.unlink(pkg.localpath)
exit_code = 1
continue
my.closeRpmDB()
sys.exit(exit_code)
if __name__ == "__main__":
main()
| ./CrossVul/dataset_final_sorted/CWE-22/py/bad_116_0 |
crossvul-python_data_bad_3539_1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Proxy AMI-related calls from cloud controller to objectstore service."""
import binascii
import os
import shutil
import tarfile
import tempfile
from xml.etree import ElementTree
import boto.s3.connection
import eventlet
from nova import crypto
import nova.db.api
from nova import exception
from nova import flags
from nova import image
from nova import log as logging
from nova import utils
from nova.api.ec2 import ec2utils
LOG = logging.getLogger("nova.image.s3")
FLAGS = flags.FLAGS
flags.DEFINE_string('image_decryption_dir', '/tmp',
'parent dir for tempdir used for image decryption')
flags.DEFINE_string('s3_access_key', 'notchecked',
'access key to use for s3 server for images')
flags.DEFINE_string('s3_secret_key', 'notchecked',
'secret key to use for s3 server for images')
class S3ImageService(object):
"""Wraps an existing image service to support s3 based register."""
def __init__(self, service=None, *args, **kwargs):
self.service = service or image.get_default_image_service()
self.service.__init__(*args, **kwargs)
def get_image_uuid(self, context, image_id):
return nova.db.api.s3_image_get(context, image_id)['uuid']
def get_image_id(self, context, image_uuid):
return nova.db.api.s3_image_get_by_uuid(context, image_uuid)['id']
def _create_image_id(self, context, image_uuid):
return nova.db.api.s3_image_create(context, image_uuid)['id']
def _translate_uuids_to_ids(self, context, images):
return [self._translate_uuid_to_id(context, img) for img in images]
def _translate_uuid_to_id(self, context, image):
def _find_or_create(image_uuid):
if image_uuid is None:
return
try:
return self.get_image_id(context, image_uuid)
except exception.NotFound:
return self._create_image_id(context, image_uuid)
image_copy = image.copy()
try:
image_id = image_copy['id']
except KeyError:
pass
else:
image_copy['id'] = _find_or_create(image_id)
for prop in ['kernel_id', 'ramdisk_id']:
try:
image_uuid = image_copy['properties'][prop]
except (KeyError, ValueError):
pass
else:
image_copy['properties'][prop] = _find_or_create(image_uuid)
return image_copy
def create(self, context, metadata, data=None):
"""Create an image.
metadata['properties'] should contain image_location.
"""
image = self._s3_create(context, metadata)
return image
def delete(self, context, image_id):
image_uuid = self.get_image_uuid(context, image_id)
self.service.delete(context, image_uuid)
def update(self, context, image_id, metadata, data=None):
image_uuid = self.get_image_uuid(context, image_id)
image = self.service.update(context, image_uuid, metadata, data)
return self._translate_uuid_to_id(context, image)
def index(self, context):
#NOTE(bcwaldon): sort asc to make sure we assign lower ids
# to older images
images = self.service.index(context, sort_dir='asc')
return self._translate_uuids_to_ids(context, images)
def detail(self, context):
#NOTE(bcwaldon): sort asc to make sure we assign lower ids
# to older images
images = self.service.detail(context, sort_dir='asc')
return self._translate_uuids_to_ids(context, images)
def show(self, context, image_id):
image_uuid = self.get_image_uuid(context, image_id)
image = self.service.show(context, image_uuid)
return self._translate_uuid_to_id(context, image)
def show_by_name(self, context, name):
image = self.service.show_by_name(context, name)
return self._translate_uuid_to_id(context, image)
def get(self, context, image_id):
image_uuid = self.get_image_uuid(context, image_id)
return self.get(self, context, image_uuid)
@staticmethod
def _conn(context):
# NOTE(vish): access and secret keys for s3 server are not
# checked in nova-objectstore
access = FLAGS.s3_access_key
secret = FLAGS.s3_secret_key
calling = boto.s3.connection.OrdinaryCallingFormat()
return boto.s3.connection.S3Connection(aws_access_key_id=access,
aws_secret_access_key=secret,
is_secure=False,
calling_format=calling,
port=FLAGS.s3_port,
host=FLAGS.s3_host)
@staticmethod
def _download_file(bucket, filename, local_dir):
key = bucket.get_key(filename)
local_filename = os.path.join(local_dir, filename)
key.get_contents_to_filename(local_filename)
return local_filename
def _s3_parse_manifest(self, context, metadata, manifest):
manifest = ElementTree.fromstring(manifest)
image_format = 'ami'
image_type = 'machine'
try:
kernel_id = manifest.find('machine_configuration/kernel_id').text
if kernel_id == 'true':
image_format = 'aki'
image_type = 'kernel'
kernel_id = None
except Exception:
kernel_id = None
try:
ramdisk_id = manifest.find('machine_configuration/ramdisk_id').text
if ramdisk_id == 'true':
image_format = 'ari'
image_type = 'ramdisk'
ramdisk_id = None
except Exception:
ramdisk_id = None
try:
arch = manifest.find('machine_configuration/architecture').text
except Exception:
arch = 'x86_64'
# NOTE(yamahata):
# EC2 ec2-budlne-image --block-device-mapping accepts
# <virtual name>=<device name> where
# virtual name = {ami, root, swap, ephemeral<N>}
# where N is no negative integer
# device name = the device name seen by guest kernel.
# They are converted into
# block_device_mapping/mapping/{virtual, device}
#
# Do NOT confuse this with ec2-register's block device mapping
# argument.
mappings = []
try:
block_device_mapping = manifest.findall('machine_configuration/'
'block_device_mapping/'
'mapping')
for bdm in block_device_mapping:
mappings.append({'virtual': bdm.find('virtual').text,
'device': bdm.find('device').text})
except Exception:
mappings = []
properties = metadata['properties']
properties['project_id'] = context.project_id
properties['architecture'] = arch
def _translate_dependent_image_id(image_key, image_id):
image_id = ec2utils.ec2_id_to_id(image_id)
image_uuid = self.get_image_uuid(context, image_id)
properties['image_id'] = image_uuid
if kernel_id:
_translate_dependent_image_id('kernel_id', kernel_id)
if ramdisk_id:
_translate_dependent_image_id('ramdisk_id', ramdisk_id)
if mappings:
properties['mappings'] = mappings
metadata.update({'disk_format': image_format,
'container_format': image_format,
'status': 'queued',
'is_public': False,
'properties': properties})
metadata['properties']['image_state'] = 'pending'
#TODO(bcwaldon): right now, this removes user-defined ids.
# We need to re-enable this.
image_id = metadata.pop('id', None)
image = self.service.create(context, metadata)
# extract the new uuid and generate an int id to present back to user
image_uuid = image['id']
image['id'] = self._create_image_id(context, image_uuid)
# return image_uuid so the caller can still make use of image_service
return manifest, image, image_uuid
def _s3_create(self, context, metadata):
"""Gets a manifext from s3 and makes an image."""
image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir)
image_location = metadata['properties']['image_location']
bucket_name = image_location.split('/')[0]
manifest_path = image_location[len(bucket_name) + 1:]
bucket = self._conn(context).get_bucket(bucket_name)
key = bucket.get_key(manifest_path)
manifest = key.get_contents_as_string()
manifest, image, image_uuid = self._s3_parse_manifest(context,
metadata,
manifest)
def delayed_create():
"""This handles the fetching and decrypting of the part files."""
log_vars = {'image_location': image_location,
'image_path': image_path}
metadata['properties']['image_state'] = 'downloading'
self.service.update(context, image_uuid, metadata)
try:
parts = []
elements = manifest.find('image').getiterator('filename')
for fn_element in elements:
part = self._download_file(bucket,
fn_element.text,
image_path)
parts.append(part)
# NOTE(vish): this may be suboptimal, should we use cat?
enc_filename = os.path.join(image_path, 'image.encrypted')
with open(enc_filename, 'w') as combined:
for filename in parts:
with open(filename) as part:
shutil.copyfileobj(part, combined)
except Exception:
LOG.exception(_("Failed to download %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_download'
self.service.update(context, image_uuid, metadata)
return
metadata['properties']['image_state'] = 'decrypting'
self.service.update(context, image_uuid, metadata)
try:
hex_key = manifest.find('image/ec2_encrypted_key').text
encrypted_key = binascii.a2b_hex(hex_key)
hex_iv = manifest.find('image/ec2_encrypted_iv').text
encrypted_iv = binascii.a2b_hex(hex_iv)
# FIXME(vish): grab key from common service so this can run on
# any host.
cloud_pk = crypto.key_path(context.project_id)
dec_filename = os.path.join(image_path, 'image.tar.gz')
self._decrypt_image(enc_filename, encrypted_key,
encrypted_iv, cloud_pk,
dec_filename)
except Exception:
LOG.exception(_("Failed to decrypt %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_decrypt'
self.service.update(context, image_uuid, metadata)
return
metadata['properties']['image_state'] = 'untarring'
self.service.update(context, image_uuid, metadata)
try:
unz_filename = self._untarzip_image(image_path, dec_filename)
except Exception:
LOG.exception(_("Failed to untar %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_untar'
self.service.update(context, image_uuid, metadata)
return
metadata['properties']['image_state'] = 'uploading'
self.service.update(context, image_uuid, metadata)
try:
with open(unz_filename) as image_file:
self.service.update(context, image_uuid,
metadata, image_file)
except Exception:
LOG.exception(_("Failed to upload %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_upload'
self.service.update(context, image_uuid, metadata)
return
metadata['properties']['image_state'] = 'available'
metadata['status'] = 'active'
self.service.update(context, image_uuid, metadata)
shutil.rmtree(image_path)
eventlet.spawn_n(delayed_create)
return image
@staticmethod
def _decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
cloud_private_key, decrypted_filename):
key, err = utils.execute('openssl',
'rsautl',
'-decrypt',
'-inkey', '%s' % cloud_private_key,
process_input=encrypted_key,
check_exit_code=False)
if err:
raise exception.Error(_('Failed to decrypt private key: %s')
% err)
iv, err = utils.execute('openssl',
'rsautl',
'-decrypt',
'-inkey', '%s' % cloud_private_key,
process_input=encrypted_iv,
check_exit_code=False)
if err:
raise exception.Error(_('Failed to decrypt initialization '
'vector: %s') % err)
_out, err = utils.execute('openssl', 'enc',
'-d', '-aes-128-cbc',
'-in', '%s' % (encrypted_filename,),
'-K', '%s' % (key,),
'-iv', '%s' % (iv,),
'-out', '%s' % (decrypted_filename,),
check_exit_code=False)
if err:
raise exception.Error(_('Failed to decrypt image file '
'%(image_file)s: %(err)s') %
{'image_file': encrypted_filename,
'err': err})
@staticmethod
def _untarzip_image(path, filename):
tar_file = tarfile.open(filename, 'r|gz')
tar_file.extractall(path)
image_file = tar_file.getnames()[0]
tar_file.close()
return os.path.join(path, image_file)
| ./CrossVul/dataset_final_sorted/CWE-22/py/bad_3539_1 |
crossvul-python_data_good_4110_0 | """ Regenerate golden-master """
import shutil
from pathlib import Path
from typer.testing import CliRunner
from openapi_python_client.cli import app
if __name__ == "__main__":
runner = CliRunner()
openapi_path = Path(__file__).parent / "fastapi_app" / "openapi.json"
gm_path = Path(__file__).parent / "golden-master"
shutil.rmtree(gm_path, ignore_errors=True)
output_path = Path.cwd() / "my-test-api-client"
shutil.rmtree(output_path, ignore_errors=True)
config_path = Path(__file__).parent / "config.yml"
result = runner.invoke(app, [f"--config={config_path}", "generate", f"--path={openapi_path}"])
if result.stdout:
print(result.stdout)
if result.exception:
raise result.exception
output_path.rename(gm_path)
| ./CrossVul/dataset_final_sorted/CWE-22/py/good_4110_0 |
crossvul-python_data_bad_4110_0 | """ Regenerate golden-master """
import shutil
from pathlib import Path
from typer.testing import CliRunner
from openapi_python_client.cli import app
if __name__ == "__main__":
from .fastapi_app import generate_openapi_json
generate_openapi_json()
runner = CliRunner()
openapi_path = Path(__file__).parent / "fastapi_app" / "openapi.json"
gm_path = Path(__file__).parent / "golden-master"
shutil.rmtree(gm_path, ignore_errors=True)
output_path = Path.cwd() / "my-test-api-client"
shutil.rmtree(output_path, ignore_errors=True)
config_path = Path(__file__).parent / "config.yml"
result = runner.invoke(app, [f"--config={config_path}", "generate", f"--path={openapi_path}"])
if result.stdout:
print(result.stdout)
if result.exception:
raise result.exception
output_path.rename(gm_path)
| ./CrossVul/dataset_final_sorted/CWE-22/py/bad_4110_0 |
crossvul-python_data_good_3538_1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Proxy AMI-related calls from cloud controller to objectstore service."""
import binascii
import os
import shutil
import tarfile
import tempfile
from xml.etree import ElementTree
import boto.s3.connection
import eventlet
from nova import crypto
from nova import exception
from nova import flags
from nova import image
from nova import log as logging
from nova import utils
from nova.image import service
from nova.api.ec2 import ec2utils
LOG = logging.getLogger("nova.image.s3")
FLAGS = flags.FLAGS
flags.DEFINE_string('image_decryption_dir', '/tmp',
'parent dir for tempdir used for image decryption')
flags.DEFINE_string('s3_access_key', 'notchecked',
'access key to use for s3 server for images')
flags.DEFINE_string('s3_secret_key', 'notchecked',
'secret key to use for s3 server for images')
class S3ImageService(service.BaseImageService):
"""Wraps an existing image service to support s3 based register."""
def __init__(self, service=None, *args, **kwargs):
self.service = service or image.get_default_image_service()
self.service.__init__(*args, **kwargs)
def create(self, context, metadata, data=None):
"""Create an image.
metadata['properties'] should contain image_location.
"""
image = self._s3_create(context, metadata)
return image
def delete(self, context, image_id):
self.service.delete(context, image_id)
def update(self, context, image_id, metadata, data=None):
image = self.service.update(context, image_id, metadata, data)
return image
def index(self, context):
return self.service.index(context)
def detail(self, context):
return self.service.detail(context)
def show(self, context, image_id):
return self.service.show(context, image_id)
def show_by_name(self, context, name):
return self.service.show_by_name(context, name)
@staticmethod
def _conn(context):
# NOTE(vish): access and secret keys for s3 server are not
# checked in nova-objectstore
access = FLAGS.s3_access_key
secret = FLAGS.s3_secret_key
calling = boto.s3.connection.OrdinaryCallingFormat()
return boto.s3.connection.S3Connection(aws_access_key_id=access,
aws_secret_access_key=secret,
is_secure=False,
calling_format=calling,
port=FLAGS.s3_port,
host=FLAGS.s3_host)
@staticmethod
def _download_file(bucket, filename, local_dir):
key = bucket.get_key(filename)
local_filename = os.path.join(local_dir, os.path.basename(filename))
key.get_contents_to_filename(local_filename)
return local_filename
def _s3_parse_manifest(self, context, metadata, manifest):
manifest = ElementTree.fromstring(manifest)
image_format = 'ami'
image_type = 'machine'
try:
kernel_id = manifest.find('machine_configuration/kernel_id').text
if kernel_id == 'true':
image_format = 'aki'
image_type = 'kernel'
kernel_id = None
except Exception:
kernel_id = None
try:
ramdisk_id = manifest.find('machine_configuration/ramdisk_id').text
if ramdisk_id == 'true':
image_format = 'ari'
image_type = 'ramdisk'
ramdisk_id = None
except Exception:
ramdisk_id = None
try:
arch = manifest.find('machine_configuration/architecture').text
except Exception:
arch = 'x86_64'
# NOTE(yamahata):
# EC2 ec2-budlne-image --block-device-mapping accepts
# <virtual name>=<device name> where
# virtual name = {ami, root, swap, ephemeral<N>}
# where N is no negative integer
# device name = the device name seen by guest kernel.
# They are converted into
# block_device_mapping/mapping/{virtual, device}
#
# Do NOT confuse this with ec2-register's block device mapping
# argument.
mappings = []
try:
block_device_mapping = manifest.findall('machine_configuration/'
'block_device_mapping/'
'mapping')
for bdm in block_device_mapping:
mappings.append({'virtual': bdm.find('virtual').text,
'device': bdm.find('device').text})
except Exception:
mappings = []
properties = metadata['properties']
properties['project_id'] = context.project_id
properties['architecture'] = arch
if kernel_id:
properties['kernel_id'] = ec2utils.ec2_id_to_id(kernel_id)
if ramdisk_id:
properties['ramdisk_id'] = ec2utils.ec2_id_to_id(ramdisk_id)
if mappings:
properties['mappings'] = mappings
metadata.update({'disk_format': image_format,
'container_format': image_format,
'status': 'queued',
'is_public': False,
'properties': properties})
metadata['properties']['image_state'] = 'pending'
image = self.service.create(context, metadata)
return manifest, image
def _s3_create(self, context, metadata):
"""Gets a manifext from s3 and makes an image."""
image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir)
image_location = metadata['properties']['image_location']
bucket_name = image_location.split('/')[0]
manifest_path = image_location[len(bucket_name) + 1:]
bucket = self._conn(context).get_bucket(bucket_name)
key = bucket.get_key(manifest_path)
manifest = key.get_contents_as_string()
manifest, image = self._s3_parse_manifest(context, metadata, manifest)
image_id = image['id']
def delayed_create():
"""This handles the fetching and decrypting of the part files."""
log_vars = {'image_location': image_location,
'image_path': image_path}
metadata['properties']['image_state'] = 'downloading'
self.service.update(context, image_id, metadata)
try:
parts = []
elements = manifest.find('image').getiterator('filename')
for fn_element in elements:
part = self._download_file(bucket,
fn_element.text,
image_path)
parts.append(part)
# NOTE(vish): this may be suboptimal, should we use cat?
enc_filename = os.path.join(image_path, 'image.encrypted')
with open(enc_filename, 'w') as combined:
for filename in parts:
with open(filename) as part:
shutil.copyfileobj(part, combined)
except Exception:
LOG.exception(_("Failed to download %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_download'
self.service.update(context, image_id, metadata)
return
metadata['properties']['image_state'] = 'decrypting'
self.service.update(context, image_id, metadata)
try:
hex_key = manifest.find('image/ec2_encrypted_key').text
encrypted_key = binascii.a2b_hex(hex_key)
hex_iv = manifest.find('image/ec2_encrypted_iv').text
encrypted_iv = binascii.a2b_hex(hex_iv)
# FIXME(vish): grab key from common service so this can run on
# any host.
cloud_pk = crypto.key_path(context.project_id)
dec_filename = os.path.join(image_path, 'image.tar.gz')
self._decrypt_image(enc_filename, encrypted_key,
encrypted_iv, cloud_pk,
dec_filename)
except Exception:
LOG.exception(_("Failed to decrypt %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_decrypt'
self.service.update(context, image_id, metadata)
return
metadata['properties']['image_state'] = 'untarring'
self.service.update(context, image_id, metadata)
try:
unz_filename = self._untarzip_image(image_path, dec_filename)
except Exception:
LOG.exception(_("Failed to untar %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_untar'
self.service.update(context, image_id, metadata)
return
metadata['properties']['image_state'] = 'uploading'
self.service.update(context, image_id, metadata)
try:
with open(unz_filename) as image_file:
self.service.update(context, image_id,
metadata, image_file)
except Exception:
LOG.exception(_("Failed to upload %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_upload'
self.service.update(context, image_id, metadata)
return
metadata['properties']['image_state'] = 'available'
metadata['status'] = 'active'
self.service.update(context, image_id, metadata)
shutil.rmtree(image_path)
eventlet.spawn_n(delayed_create)
return image
@staticmethod
def _decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
cloud_private_key, decrypted_filename):
key, err = utils.execute('openssl',
'rsautl',
'-decrypt',
'-inkey', '%s' % cloud_private_key,
process_input=encrypted_key,
check_exit_code=False)
if err:
raise exception.Error(_('Failed to decrypt private key: %s')
% err)
iv, err = utils.execute('openssl',
'rsautl',
'-decrypt',
'-inkey', '%s' % cloud_private_key,
process_input=encrypted_iv,
check_exit_code=False)
if err:
raise exception.Error(_('Failed to decrypt initialization '
'vector: %s') % err)
_out, err = utils.execute('openssl', 'enc',
'-d', '-aes-128-cbc',
'-in', '%s' % (encrypted_filename,),
'-K', '%s' % (key,),
'-iv', '%s' % (iv,),
'-out', '%s' % (decrypted_filename,),
check_exit_code=False)
if err:
raise exception.Error(_('Failed to decrypt image file '
'%(image_file)s: %(err)s') %
{'image_file': encrypted_filename,
'err': err})
@staticmethod
def _test_for_malicious_tarball(path, filename):
"""Raises exception if extracting tarball would escape extract path"""
tar_file = tarfile.open(filename, 'r|gz')
for n in tar_file.getnames():
if not os.path.abspath(os.path.join(path, n)).startswith(path):
tar_file.close()
raise exception.Error(_('Unsafe filenames in image'))
tar_file.close()
@staticmethod
def _untarzip_image(path, filename):
S3ImageService._test_for_malicious_tarball(path, filename)
tar_file = tarfile.open(filename, 'r|gz')
tar_file.extractall(path)
image_file = tar_file.getnames()[0]
tar_file.close()
return os.path.join(path, image_file)
| ./CrossVul/dataset_final_sorted/CWE-22/py/good_3538_1 |
crossvul-python_data_bad_4657_0 | #!/usr/bin/env python3
# -*-coding:UTF-8 -*
"""
The ZMQ_Feed_Q Module
=====================
This module is consuming the Redis-list created by the ZMQ_Feed_Q Module,
And save the paste on disk to allow others modules to work on them.
..todo:: Be able to choose to delete or not the saved paste after processing.
..todo:: Store the empty paste (unprocessed) somewhere in Redis.
..note:: Module ZMQ_Something_Q and ZMQ_Something are closely bound, always put
the same Subscriber name in both of them.
Requirements
------------
*Need running Redis instances.
*Need the ZMQ_Feed_Q Module running to be able to work properly.
"""
import base64
import os
import time
import uuid
from pubsublogger import publisher
from Helper import Process
import magic
def rreplace(s, old, new, occurrence):
li = s.rsplit(old, occurrence)
return new.join(li)
if __name__ == '__main__':
publisher.port = 6380
publisher.channel = 'Script'
processed_paste = 0
time_1 = time.time()
config_section = 'Global'
p = Process(config_section)
PASTES_FOLDER = os.path.join(os.environ['AIL_HOME'], p.config.get("Directories", "pastes"))
PASTES_FOLDERS = PASTES_FOLDER + '/'
# LOGGING #
publisher.info("Feed Script started to receive & publish.")
while True:
message = p.get_from_set()
# Recovering the streamed message informations.
if message is not None:
splitted = message.split()
if len(splitted) == 2:
paste, gzip64encoded = splitted
else:
# TODO Store the name of the empty paste inside a Redis-list.
print("Empty Paste: not processed")
publisher.debug("Empty Paste: {0} not processed".format(message))
continue
else:
print("Empty Queues: Waiting...")
if int(time.time() - time_1) > 30:
to_print = 'Global; ; ; ;glob Processed {0} paste(s)'.format(processed_paste)
print(to_print)
#publisher.info(to_print)
time_1 = time.time()
processed_paste = 0
time.sleep(1)
continue
file_name_paste = paste.split('/')[-1]
if len(file_name_paste)>255:
new_file_name_paste = '{}{}.gz'.format(file_name_paste[:215], str(uuid.uuid4()))
paste = rreplace(paste, file_name_paste, new_file_name_paste, 1)
# Creating the full filepath
filename = os.path.join(PASTES_FOLDER, paste)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
decoded = base64.standard_b64decode(gzip64encoded)
with open(filename, 'wb') as f:
f.write(decoded)
'''try:
decoded2 = gunzip_bytes_obj(decoded)
except:
decoded2 =''
type = magic.from_buffer(decoded2, mime=True)
if type!= 'text/x-c++' and type!= 'text/html' and type!= 'text/x-c' and type!= 'text/x-python' and type!= 'text/x-php' and type!= 'application/xml' and type!= 'text/x-shellscript' and type!= 'text/plain' and type!= 'text/x-diff' and type!= 'text/x-ruby':
print('-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------')
print(filename)
print(type)
print('-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------')
'''
# remove PASTES_FOLDER from item path (crawled item + submited)
if PASTES_FOLDERS in paste:
paste = paste.replace(PASTES_FOLDERS, '', 1)
p.populate_set_out(paste)
processed_paste+=1
| ./CrossVul/dataset_final_sorted/CWE-22/py/bad_4657_0 |
crossvul-python_data_bad_495_0 | # -*- coding: utf-8 -*-
##############################################################################
# 2011 E2OpenPlugins #
# #
# This file is open source software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
##############################################################################
import os
import re
import glob
from urllib import quote
import json
from twisted.web import static, resource, http
from Components.config import config
from Tools.Directories import fileExists
def new_getRequestHostname(self):
host = self.getHeader(b'host')
if host:
if host[0]=='[':
return host.split(']',1)[0] + "]"
return host.split(':', 1)[0].encode('ascii')
return self.getHost().host.encode('ascii')
http.Request.getRequestHostname = new_getRequestHostname
class FileController(resource.Resource):
def render(self, request):
action = "download"
if "action" in request.args:
action = request.args["action"][0]
if "file" in request.args:
filename = request.args["file"][0].decode('utf-8', 'ignore').encode('utf-8')
filename = re.sub("^/+", "/", os.path.realpath(filename))
if not os.path.exists(filename):
return "File '%s' not found" % (filename)
if action == "stream":
name = "stream"
if "name" in request.args:
name = request.args["name"][0]
port = config.OpenWebif.port.value
proto = 'http'
if request.isSecure():
port = config.OpenWebif.https_port.value
proto = 'https'
ourhost = request.getHeader('host')
m = re.match('.+\:(\d+)$', ourhost)
if m is not None:
port = m.group(1)
response = "#EXTM3U\n#EXTVLCOPT--http-reconnect=true\n#EXTINF:-1,%s\n%s://%s:%s/file?action=download&file=%s" % (name, proto, request.getRequestHostname(), port, quote(filename))
request.setHeader("Content-Disposition", 'attachment;filename="%s.m3u"' % name)
request.setHeader("Content-Type", "application/x-mpegurl")
return response
elif action == "delete":
request.setResponseCode(http.OK)
return "TODO: DELETE FILE: %s" % (filename)
elif action == "download":
request.setHeader("Content-Disposition", "attachment;filename=\"%s\"" % (filename.split('/')[-1]))
rfile = static.File(filename, defaultType = "application/octet-stream")
return rfile.render(request)
else:
return "wrong action parameter"
if "dir" in request.args:
path = request.args["dir"][0]
pattern = '*'
data = []
if "pattern" in request.args:
pattern = request.args["pattern"][0]
directories = []
files = []
if fileExists(path):
try:
files = glob.glob(path+'/'+pattern)
except:
files = []
files.sort()
tmpfiles = files[:]
for x in tmpfiles:
if os.path.isdir(x):
directories.append(x + '/')
files.remove(x)
data.append({"result": True,"dirs": directories,"files": files})
else:
data.append({"result": False,"message": "path %s not exits" % (path)})
request.setHeader("content-type", "application/json; charset=utf-8")
return json.dumps(data, indent=2)
| ./CrossVul/dataset_final_sorted/CWE-22/py/bad_495_0 |
crossvul-python_data_good_495_1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
RESTful Filesystem access using HTTP
------------------------------------
This controller and helper classes exposes parts or all of the server's
filesystem. Means to retrieve and delete files are provided as well as the
ability to list folder contents.
The generated responses are returned as JSON data with appropriate HTTP headers.
Output will be compressed using gzip most of the times.
Example calls using curl
++++++++++++++++++++++++
The following examples assume that the FileController instance is accessible
as '/file' on 'localhost', port 18888 (http://localhost:18888/file).
Fetch list of files and folders in root folder:
curl --noproxy localhost -iv http://localhost:18888/file
Fetch example file 'example.txt'
curl --noproxy localhost -iv http://localhost:18888/file/example.txt
Fetch gzipped example file 'example.txt'
curl --compressed -H "Accept-Encoding: gzip" --noproxy localhost -iv http://localhost:18888/file/example.txt
Delete example file 'example.txt'
curl --noproxy localhost -iv -X DELETE http://localhost:18888/file/example.txt
"""
import os
import json
import glob
import re
import urlparse
import twisted.web.static
from twisted.web import http
from utilities import MANY_SLASHES_REGEX
import file
#: default path from which files will be served
DEFAULT_ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
#: CORS - HTTP headers the client may use
CORS_ALLOWED_CLIENT_HEADERS = [
'Content-Type',
]
#: CORS - HTTP methods the client may use
CORS_ALLOWED_METHODS_DEFAULT = ['GET', 'PUT', 'POST', 'DELETE', 'OPTIONS']
#: CORS - default origin header value
CORS_DEFAULT_ALLOW_ORIGIN = '*'
#: CORS - HTTP headers the server will send as part of OPTIONS response
CORS_DEFAULT = {
'Access-Control-Allow-Origin': CORS_DEFAULT_ALLOW_ORIGIN,
'Access-Control-Allow-Credentials': 'true',
'Access-Control-Max-Age': '86400',
'Access-Control-Allow-Methods': ','.join(CORS_ALLOWED_METHODS_DEFAULT),
'Access-Control-Allow-Headers': ', '.join(CORS_ALLOWED_CLIENT_HEADERS)
}
#: paths where file delete operations shall be allowed
DELETE_WHITELIST = [
'/media',
]
class FileController(twisted.web.resource.Resource):
isLeaf = True
_override_args = (
'resource_prefix', 'root', 'do_delete', 'delete_whitelist')
_resource_prefix = '/file'
_root = os.path.abspath(os.path.dirname(__file__))
_do_delete = False
_delete_whitelist = DELETE_WHITELIST
never_gzip_extensions = ('.ts',)
def __init__(self, *args, **kwargs):
"""
Default Constructor.
Args:
resource_prefix: Prefix value for this controller instance.
Default is :py:data:`FileController._resource_prefix`
root: Root path of files to be served.
Default is the path where the current file is located
do_delete: Try to actually delete files?
Default is False.
delete_whitelist: Folder prefixes where delete operations are
allowed _at all_. Default is :py:data:`DELETE_WHITELIST`
"""
if args:
for key, value in zip(self._override_args, args):
kwargs[key] = value
for arg_name in self._override_args:
if kwargs.get(arg_name) is not None:
attr_name = '_{:s}'.format(arg_name)
setattr(self, attr_name, kwargs.get(arg_name))
self.session = kwargs.get("session")
def _json_response(self, request, data):
"""
Create a JSON representation for *data* and set HTTP headers indicating
that JSON encoded data is returned.
Args:
request (twisted.web.server.Request): HTTP request object
data: response content
Returns:
JSON representation of *data* with appropriate HTTP headers
"""
request.setHeader("content-type", "application/json; charset=utf-8")
return json.dumps(data, indent=2)
def get_response_data_template(self, request):
"""
Generate a response data :class:`dict` containing default values and
some request attribute values for debugging purposes.
Args:
request (twisted.web.server.Request): HTTP request object
Returns:
(dict) response template data
"""
file_path = None
if request.path.startswith(self._resource_prefix):
file_path = request.path[len(self._resource_prefix):]
response_data = {
"_request": {
"path": request.path,
"uri": request.uri,
"method": request.method,
"postpath": request.postpath,
"file_path": file_path,
},
"result": False,
}
return response_data
def error_response(self, request, response_code=None, **kwargs):
"""
Create and return an HTTP error response with data as JSON.
Args:
request (twisted.web.server.Request): HTTP request object
response_code: HTTP Status Code (default is 500)
**kwargs: additional key/value pairs
Returns:
JSON encoded data with appropriate HTTP headers
"""
if response_code is None:
response_code = http.INTERNAL_SERVER_ERROR
response_data = self.get_response_data_template(request)
response_data.update(**kwargs)
response_data['me'] = dict()
for arg_name in self._override_args:
attr_name = '_{:s}'.format(arg_name)
response_data['me'][attr_name] = getattr(self, attr_name)
request.setResponseCode(response_code)
return self._json_response(request, response_data)
def _existing_path_or_bust(self, request):
"""
Verify that a filesystem location which is contained in *request.path*
is valid and an existing path.
Args:
request (twisted.web.server.Request): HTTP request object
Returns:
path
Raises:
ValueError: If contained path value is invalid.
IOError: If contained path value is not existing.
"""
rq_path = urlparse.unquote(request.path)
if not rq_path.startswith(self._resource_prefix):
raise ValueError("Invalid Request Path {!r}".format(request.path))
file_path = os.path.join(
self._root, rq_path[len(self._resource_prefix) + 1:])
file_path = re.sub(MANY_SLASHES_REGEX, '/', file_path)
if not os.path.exists(file_path):
raise IOError("Not Found {!r}".format(file_path))
return file_path
def render_OPTIONS(self, request):
"""
Render response for an HTTP OPTIONS request.
Example request
curl -iv --noproxy localhost http://localhost:18888/file
Args:
request (twisted.web.server.Request): HTTP request object
Returns:
HTTP response with headers
"""
for key in CORS_DEFAULT:
request.setHeader(key, CORS_DEFAULT[key])
return ''
def render_legacy(self, request):
"""
Render response for an HTTP GET request. In order to maintain
backward compatibility this method emulates the behaviour of the
legacy method implementation.
Args:
request (twisted.web.server.Request): HTTP request object
Returns:
HTTP response with headers
"""
return file.FileController().render(request)
def _glob(self, path, pattern='*'):
if path == '/':
glob_me = '/' + pattern
else:
glob_me = '/'.join((path, pattern))
return glob.iglob(glob_me)
def _walk(self, path):
for root, dirs, files in os.walk(path):
for dir_item in dirs:
yield os.path.join(root, dir_item)
for file_item in files:
yield os.path.join(root, file_item)
def render_path_listing(self, request, path):
"""
Generate a file/folder listing of *path*'s contents.
Args:
request (twisted.web.server.Request): HTTP request object
path: folder location
Returns:
HTTP response with headers
"""
response_data = self.get_response_data_template(request)
response_data.update(
{
'result': True,
'dirs': [],
'files': [],
}
)
generator = None
if "pattern" in request.args:
generator = self._glob(path, request.args["pattern"][0])
if "recursive" in request.args:
generator = self._walk(path)
if generator is None:
generator = self._glob(path)
for item in generator:
if os.path.isdir(item):
response_data['dirs'].append(item)
else:
response_data['files'].append(item)
return self._json_response(request, response_data)
def render_file(self, request, path):
"""
Return the contents of file *path*.
Args:
request (twisted.web.server.Request): HTTP request object
path: file path
Returns:
HTTP response with headers
"""
(_, ext) = os.path.splitext(path)
if ext in self.never_gzip_extensions:
# hack: remove gzip from the list of supported encodings
acceptHeaders = request.requestHeaders.getRawHeaders(
'accept-encoding', [])
supported = ','.join(acceptHeaders).split(',')
request.requestHeaders.setRawHeaders(
'accept-encoding', list(set(supported) - {'gzip'}))
result = twisted.web.static.File(
path, defaultType="application/octet-stream")
return result.render(request)
def render_GET(self, request):
"""
HTTP GET request handler returning
* legacy response if the query *file* or *dir* parameter is set
* file contents if *request.path* contains a file path
* directory listing if *request.path* contains a folder path
Args:
request (twisted.web.server.Request): HTTP request object
Returns:
HTTP response with headers
"""
attic_args = {'file', 'dir'}
if len(attic_args & set(request.args.keys())) >= 1:
return self.render_legacy(request)
request.setHeader(
'Access-Control-Allow-Origin', CORS_DEFAULT_ALLOW_ORIGIN)
try:
target_path = self._existing_path_or_bust(request)
except ValueError as vexc:
return self.error_response(
request, response_code=http.BAD_REQUEST, message=vexc.message)
except IOError as iexc:
return self.error_response(
request, response_code=http.NOT_FOUND, message=iexc.message)
if os.path.isdir(target_path):
return self.render_path_listing(request, target_path)
else:
return self.render_file(request, target_path)
def render_POST(self, request):
"""
HTTP POST request handler (currently NOT implemented).
Args:
request (twisted.web.server.Request): HTTP request object
Returns:
HTTP response with headers
"""
request.setHeader(
'Access-Control-Allow-Origin', CORS_DEFAULT_ALLOW_ORIGIN)
return self.error_response(request, response_code=http.NOT_IMPLEMENTED)
def render_PUT(self, request):
"""
HTTP PUT request handler (currently NOT implemented).
Args:
request (twisted.web.server.Request): HTTP request object
Returns:
HTTP response with headers
"""
request.setHeader(
'Access-Control-Allow-Origin', CORS_DEFAULT_ALLOW_ORIGIN)
return self.error_response(request, response_code=http.NOT_IMPLEMENTED)
def render_DELETE(self, request):
"""
HTTP DELETE request handler which may try to delete a file if its
path's prefix is in :py:data:`FileController._delete_whitelist` and
:py:data:`FileController._do_delete` is True.
Args:
request (twisted.web.server.Request): HTTP request object
Returns:
HTTP response with headers
"""
request.setHeader(
'Access-Control-Allow-Origin', CORS_DEFAULT_ALLOW_ORIGIN)
try:
target_path = self._existing_path_or_bust(request)
except ValueError as vexc:
return self.error_response(
request, response_code=http.BAD_REQUEST, message=vexc.message)
except IOError as iexc:
return self.error_response(
request, response_code=http.NOT_FOUND, message=iexc.message)
if os.path.isdir(target_path):
return self.error_response(
request, response_code=http.NOT_IMPLEMENTED,
message='Will not remove folder {!r}'.format(target_path))
for prefix in self._delete_whitelist:
if not target_path.startswith(os.path.abspath(prefix)):
return self.error_response(request,
response_code=http.FORBIDDEN)
response_data = self.get_response_data_template(request)
try:
response_data['result'] = True
if self._do_delete:
os.unlink(target_path)
message = 'Removed {!r}'.format(target_path)
else:
message = 'WOULD remove {!r}'.format(target_path)
response_data['message'] = message
except Exception as eexc:
response_data['message'] = 'Cannot remove {!r}: {!s}'.format(
target_path, eexc.message)
request.setResponseCode(http.INTERNAL_SERVER_ERROR)
return self._json_response(request, response_data)
if __name__ == '__main__':
from twisted.web.resource import Resource, EncodingResourceWrapper
from twisted.web.server import Site, GzipEncoderFactory
from twisted.internet import reactor
# standard factory example
factory_s = Site(FileController(DEFAULT_ROOT_PATH))
# experimental factory
root = Resource()
root.putChild("/", FileController)
root.putChild("/file", FileController)
factory_r = Site(root)
# experimental factory: enable gzip compression
wrapped = EncodingResourceWrapper(
FileController(
root=DEFAULT_ROOT_PATH,
# DANGER, WILL ROBINSON! These values allow deletion of ALL files!
do_delete=True, delete_whitelist=[]
),
[GzipEncoderFactory()])
factory_s_gz = Site(wrapped)
reactor.listenTCP(18888, factory_s_gz)
reactor.run()
| ./CrossVul/dataset_final_sorted/CWE-22/py/good_495_1 |
crossvul-python_data_good_1777_0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2015 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
"""This class provides the api to talk to the client.
It will then call the cherrymodel, to get the
requested information"""
import os # shouldn't have to list any folder in the future!
import json
import cherrypy
import codecs
import sys
try:
from urllib.parse import unquote
except ImportError:
from backport.urllib.parse import unquote
try:
from urllib import parse
except ImportError:
from backport.urllib import parse
import audiotranscode
from tinytag import TinyTag
from cherrymusicserver import userdb
from cherrymusicserver import log
from cherrymusicserver import albumartfetcher
from cherrymusicserver import service
from cherrymusicserver.pathprovider import readRes
from cherrymusicserver.pathprovider import albumArtFilePath
import cherrymusicserver as cherry
import cherrymusicserver.metainfo as metainfo
from cherrymusicserver.util import Performance, MemoryZipFile
from cherrymusicserver.ext import zipstream
import time
debug = True
@service.user(model='cherrymodel', playlistdb='playlist',
useroptions='useroptions', userdb='users')
class HTTPHandler(object):
def __init__(self, config):
self.config = config
template_main = 'res/dist/main.html'
template_login = 'res/login.html'
template_firstrun = 'res/firstrun.html'
self.mainpage = readRes(template_main)
self.loginpage = readRes(template_login)
self.firstrunpage = readRes(template_firstrun)
self.handlers = {
'search': self.api_search,
'rememberplaylist': self.api_rememberplaylist,
'saveplaylist': self.api_saveplaylist,
'loadplaylist': self.api_loadplaylist,
'generaterandomplaylist': self.api_generaterandomplaylist,
'deleteplaylist': self.api_deleteplaylist,
'getmotd': self.api_getmotd,
'restoreplaylist': self.api_restoreplaylist,
'getplayables': self.api_getplayables,
'getuserlist': self.api_getuserlist,
'adduser': self.api_adduser,
'userdelete': self.api_userdelete,
'userchangepassword': self.api_userchangepassword,
'showplaylists': self.api_showplaylists,
'logout': self.api_logout,
'downloadpls': self.api_downloadpls,
'downloadm3u': self.api_downloadm3u,
'getsonginfo': self.api_getsonginfo,
'getencoders': self.api_getencoders,
'getdecoders': self.api_getdecoders,
'transcodingenabled': self.api_transcodingenabled,
'updatedb': self.api_updatedb,
'getconfiguration': self.api_getconfiguration,
'compactlistdir': self.api_compactlistdir,
'listdir': self.api_listdir,
'fetchalbumart': self.api_fetchalbumart,
'fetchalbumarturls': self.api_fetchalbumarturls,
'albumart_set': self.api_albumart_set,
'heartbeat': self.api_heartbeat,
'getuseroptions': self.api_getuseroptions,
'setuseroption': self.api_setuseroption,
'changeplaylist': self.api_changeplaylist,
'downloadcheck': self.api_downloadcheck,
'setuseroptionfor': self.api_setuseroptionfor,
}
def issecure(self, url):
return parse.urlparse(url).scheme == 'https'
def getBaseUrl(self, redirect_unencrypted=False):
ipAndPort = parse.urlparse(cherrypy.url()).netloc
is_secure_connection = self.issecure(cherrypy.url())
ssl_enabled = cherry.config['server.ssl_enabled']
if ssl_enabled and not is_secure_connection:
log.d(_('Not secure, redirecting...'))
ip = ipAndPort[:ipAndPort.rindex(':')]
url = 'https://' + ip + ':' + str(cherry.config['server.ssl_port'])
if redirect_unencrypted:
raise cherrypy.HTTPRedirect(url, 302)
else:
url = 'http://' + ipAndPort
return url
def index(self, *args, **kwargs):
self.getBaseUrl(redirect_unencrypted=True)
firstrun = 0 == self.userdb.getUserCount()
show_page = self.mainpage #generated main.html from devel.html
if 'devel' in kwargs:
#reload pages everytime in devel mode
show_page = readRes('res/devel.html')
self.loginpage = readRes('res/login.html')
self.firstrunpage = readRes('res/firstrun.html')
if 'login' in kwargs:
username = kwargs.get('username', '')
password = kwargs.get('password', '')
login_action = kwargs.get('login', '')
if login_action == 'login':
self.session_auth(username, password)
if cherrypy.session['username']:
username = cherrypy.session['username']
log.i(_('user {name} just logged in.').format(name=username))
elif login_action == 'create admin user':
if firstrun:
if username.strip() and password.strip():
self.userdb.addUser(username, password, True)
self.session_auth(username, password)
return show_page
else:
return "No, you can't."
if firstrun:
return self.firstrunpage
else:
if self.isAuthorized():
return show_page
else:
return self.loginpage
index.exposed = True
def isAuthorized(self):
try:
sessionUsername = cherrypy.session.get('username', None)
sessionUserId = cherrypy.session.get('userid', -1)
nameById = self.userdb.getNameById(sessionUserId)
except (UnicodeDecodeError, ValueError) as e:
# workaround for python2/python3 jump, filed bug in cherrypy
# https://bitbucket.org/cherrypy/cherrypy/issue/1216/sessions-python2-3-compability-unsupported
log.w(_('''
Dropping all sessions! Try not to change between python 2 and 3,
everybody has to relogin now.'''))
cherrypy.session.delete()
sessionUsername = None
if sessionUsername is None:
if self.autoLoginActive():
cherrypy.session['username'] = self.userdb.getNameById(1)
cherrypy.session['userid'] = 1
cherrypy.session['admin'] = True
return True
else:
return False
elif sessionUsername != nameById:
self.api_logout(value=None)
return False
return True
def autoLoginActive(self):
is_loopback = cherrypy.request.remote.ip in ('127.0.0.1', '::1')
if is_loopback and cherry.config['server.localhost_auto_login']:
return True
return False
def session_auth(self, username, password):
user = self.userdb.auth(username, password)
allow_remote = cherry.config['server.permit_remote_admin_login']
is_loopback = cherrypy.request.remote.ip in ('127.0.0.1', '::1')
if not is_loopback and user.isadmin and not allow_remote:
log.i(_('Rejected remote admin login from user: {name}').format(name=user.name))
user = userdb.User.nobody()
cherrypy.session['username'] = user.name
cherrypy.session['userid'] = user.uid
cherrypy.session['admin'] = user.isadmin
def getUserId(self):
try:
return cherrypy.session['userid']
except KeyError:
cherrypy.lib.sessions.expire()
cherrypy.HTTPRedirect(cherrypy.url(), 302)
return ''
def trans(self, newformat, *path, **params):
''' Transcodes the track given as ``path`` into ``newformat``.
Streams the response of the corresponding
``audiotranscode.AudioTranscode().transcodeStream()`` call.
params:
bitrate: int for kbps. None or < 1 for default
'''
if not self.isAuthorized():
raise cherrypy.HTTPRedirect(self.getBaseUrl(), 302)
cherrypy.session.release_lock()
if cherry.config['media.transcode'] and path:
# bitrate
bitrate = params.pop('bitrate', None) or None # catch empty strings
if bitrate:
try:
bitrate = max(0, int(bitrate)) or None # None if < 1
except (TypeError, ValueError):
raise cherrypy.HTTPError(400, "Bad query: "
"bitrate ({0!r}) must be an integer".format(str(bitrate)))
# path
path = os.path.sep.join(path)
if sys.version_info < (3, 0): # workaround for #327 (cherrypy issue)
path = path.decode('utf-8') # make it work with non-ascii
else:
path = codecs.decode(codecs.encode(path, 'latin1'), 'utf-8')
fullpath = os.path.join(cherry.config['media.basedir'], path)
starttime = int(params.pop('starttime', 0))
transcoder = audiotranscode.AudioTranscode()
mimetype = audiotranscode.mime_type(newformat)
cherrypy.response.headers["Content-Type"] = mimetype
try:
return transcoder.transcode_stream(fullpath, newformat,
bitrate=bitrate, starttime=starttime)
except (audiotranscode.TranscodeError, IOError) as e:
raise cherrypy.HTTPError(404, e.value)
trans.exposed = True
trans._cp_config = {'response.stream': True}
def api(self, *args, **kwargs):
"""calls the appropriate handler from the handlers
dict, if available. handlers having noauth set to
true do not need authentification to work.
"""
#check action
action = args[0] if args else ''
if not action in self.handlers:
return "Error: no such action. '%s'" % action
#authorize if not explicitly deactivated
handler = self.handlers[action]
needsAuth = not ('noauth' in dir(handler) and handler.noauth)
if needsAuth and not self.isAuthorized():
raise cherrypy.HTTPError(401, 'Unauthorized')
handler_args = {}
if 'data' in kwargs:
handler_args = json.loads(kwargs['data'])
is_binary = ('binary' in dir(handler) and handler.binary)
if is_binary:
return handler(**handler_args)
else:
return json.dumps({'data': handler(**handler_args)})
api.exposed = True
def download_check_files(self, filelist):
# only admins and allowed users may download
if not cherrypy.session['admin']:
uo = self.useroptions.forUser(self.getUserId())
if not uo.getOptionValue('media.may_download'):
return 'not_permitted'
# make sure nobody tries to escape from basedir
for f in filelist:
# don't allow to traverse up in the file system
if '/../' in f or f.startswith('../'):
return 'invalid_file'
# CVE-2015-8309: do not allow absolute file paths
if os.path.isabs(f):
return 'invalid_file'
# make sure all files are smaller than maximum download size
size_limit = cherry.config['media.maximum_download_size']
try:
if self.model.file_size_within_limit(filelist, size_limit):
return 'ok'
else:
return 'too_big'
except OSError as e: # use OSError for python2 compatibility
return str(e)
def api_downloadcheck(self, filelist):
status = self.download_check_files(filelist)
if status == 'not_permitted':
return """You are not allowed to download files."""
elif status == 'invalid_file':
return "Error: invalid filename found in {list}".format(list=filelist)
elif status == 'too_big':
size_limit = cherry.config['media.maximum_download_size']
return """Can't download: Playlist is bigger than {maxsize} mB.
The server administrator can change this configuration.
""".format(maxsize=size_limit/1024/1024)
elif status == 'ok':
return status
else:
message = "Error status check for download: {status!r}".format(status=status)
log.e(message)
return message
def download(self, value):
if not self.isAuthorized():
raise cherrypy.HTTPError(401, 'Unauthorized')
filelist = [filepath for filepath in json.loads(unquote(value))]
dlstatus = self.download_check_files(filelist)
if dlstatus == 'ok':
_save_and_release_session()
zipmime = 'application/x-zip-compressed'
cherrypy.response.headers["Content-Type"] = zipmime
zipname = 'attachment; filename="music.zip"'
cherrypy.response.headers['Content-Disposition'] = zipname
basedir = cherry.config['media.basedir']
fullpath_filelist = [os.path.join(basedir, f) for f in filelist]
return zipstream.ZipStream(fullpath_filelist)
else:
return dlstatus
download.exposed = True
download._cp_config = {'response.stream': True}
def api_getuseroptions(self):
uo = self.useroptions.forUser(self.getUserId())
uco = uo.getChangableOptions()
if cherrypy.session['admin']:
uco['media'].update({'may_download': True})
else:
uco['media'].update({'may_download': uo.getOptionValue('media.may_download')})
return uco
def api_heartbeat(self):
uo = self.useroptions.forUser(self.getUserId())
uo.setOption('last_time_online', int(time.time()))
def api_setuseroption(self, optionkey, optionval):
uo = self.useroptions.forUser(self.getUserId())
uo.setOption(optionkey, optionval)
return "success"
def api_setuseroptionfor(self, userid, optionkey, optionval):
if cherrypy.session['admin']:
uo = self.useroptions.forUser(userid)
uo.setOption(optionkey, optionval)
return "success"
else:
return "error: not permitted. Only admins can change other users options"
def api_fetchalbumarturls(self, searchterm):
if not cherrypy.session['admin']:
raise cherrypy.HTTPError(401, 'Unauthorized')
_save_and_release_session()
fetcher = albumartfetcher.AlbumArtFetcher()
imgurls = fetcher.fetchurls(searchterm)
# show no more than 10 images
return imgurls[:min(len(imgurls), 10)]
def api_albumart_set(self, directory, imageurl):
if not cherrypy.session['admin']:
raise cherrypy.HTTPError(401, 'Unauthorized')
b64imgpath = albumArtFilePath(directory)
fetcher = albumartfetcher.AlbumArtFetcher()
data, header = fetcher.retrieveData(imageurl)
self.albumartcache_save(b64imgpath, data)
def api_fetchalbumart(self, directory):
_save_and_release_session()
default_folder_image = "../res/img/folder.png"
log.i('Fetching album art for: %s' % directory)
filepath = os.path.join(cherry.config['media.basedir'], directory)
if os.path.isfile(filepath):
# if the given path is a file, try to get the image from ID3
tag = TinyTag.get(filepath, image=True)
image_data = tag.get_image()
if image_data:
log.d('Image found in tag.')
header = {'Content-Type': 'image/jpg', 'Content-Length': len(image_data)}
cherrypy.response.headers.update(header)
return image_data
else:
# if the file does not contain an image, display the image of the
# parent directory
directory = os.path.dirname(directory)
#try getting a cached album art image
b64imgpath = albumArtFilePath(directory)
img_data = self.albumartcache_load(b64imgpath)
if img_data:
cherrypy.response.headers["Content-Length"] = len(img_data)
return img_data
#try getting album art inside local folder
fetcher = albumartfetcher.AlbumArtFetcher()
localpath = os.path.join(cherry.config['media.basedir'], directory)
header, data, resized = fetcher.fetchLocal(localpath)
if header:
if resized:
#cache resized image for next time
self.albumartcache_save(b64imgpath, data)
cherrypy.response.headers.update(header)
return data
elif cherry.config['media.fetch_album_art']:
#fetch album art from online source
try:
foldername = os.path.basename(directory)
keywords = foldername
log.i(_("Fetching album art for keywords {keywords!r}").format(keywords=keywords))
header, data = fetcher.fetch(keywords)
if header:
cherrypy.response.headers.update(header)
self.albumartcache_save(b64imgpath, data)
return data
else:
# albumart fetcher failed, so we serve a standard image
raise cherrypy.HTTPRedirect(default_folder_image, 302)
except:
# albumart fetcher threw exception, so we serve a standard image
raise cherrypy.HTTPRedirect(default_folder_image, 302)
else:
# no local album art found, online fetching deactivated, show default
raise cherrypy.HTTPRedirect(default_folder_image, 302)
api_fetchalbumart.noauth = True
api_fetchalbumart.binary = True
def albumartcache_load(self, imgb64path):
if os.path.exists(imgb64path):
with open(imgb64path, 'rb') as f:
return f.read()
def albumartcache_save(self, path, data):
with open(path, 'wb') as f:
f.write(data)
def api_compactlistdir(self, directory, filterstr=None):
try:
files_to_list = self.model.listdir(directory, filterstr)
except ValueError:
raise cherrypy.HTTPError(400, 'Bad Request')
return [entry.to_dict() for entry in files_to_list]
def api_listdir(self, directory):
try:
return [entry.to_dict() for entry in self.model.listdir(directory)]
except ValueError:
raise cherrypy.HTTPError(400, 'Bad Request')
def api_search(self, searchstring):
if not searchstring.strip():
jsonresults = '[]'
else:
with Performance(_('processing whole search request')):
searchresults = self.model.search(searchstring.strip())
with Performance(_('rendering search results as json')):
jsonresults = [entry.to_dict() for entry in searchresults]
return jsonresults
def api_rememberplaylist(self, playlist):
cherrypy.session['playlist'] = playlist
def api_saveplaylist(self, playlist, public, playlistname, overwrite=False):
res = self.playlistdb.savePlaylist(
userid=self.getUserId(),
public=1 if public else 0,
playlist=playlist,
playlisttitle=playlistname,
overwrite=overwrite)
if res == "success":
return res
else:
raise cherrypy.HTTPError(400, res)
def api_deleteplaylist(self, playlistid):
res = self.playlistdb.deletePlaylist(playlistid,
self.getUserId(),
override_owner=False)
if res == "success":
return res
else:
# not the ideal status code but we don't know the actual
# cause without parsing res
raise cherrypy.HTTPError(400, res)
def api_loadplaylist(self, playlistid):
return [entry.to_dict() for entry in self.playlistdb.loadPlaylist(
playlistid=playlistid,
userid=self.getUserId()
)]
def api_generaterandomplaylist(self):
return [entry.to_dict() for entry in self.model.randomMusicEntries(50)]
def api_changeplaylist(self, plid, attribute, value):
if attribute == 'public':
is_valid = type(value) == bool and type(plid) == int
if is_valid:
return self.playlistdb.setPublic(userid=self.getUserId(),
plid=plid,
public=value)
def api_getmotd(self):
if cherrypy.session['admin'] and cherry.config['general.update_notification']:
_save_and_release_session()
new_versions = self.model.check_for_updates()
if new_versions:
newest_version = new_versions[0]['version']
features = []
fixes = []
for version in new_versions:
for update in version['features']:
if update.startswith('FEATURE:'):
features.append(update[len('FEATURE:'):])
elif update.startswith('FIX:'):
fixes.append(update[len('FIX:'):])
elif update.startswith('FIXED:'):
fixes.append(update[len('FIXED:'):])
retdata = {'type': 'update', 'data': {}}
retdata['data']['version'] = newest_version
retdata['data']['features'] = features
retdata['data']['fixes'] = fixes
return retdata
return {'type': 'wisdom', 'data': self.model.motd()}
def api_restoreplaylist(self):
session_playlist = cherrypy.session.get('playlist', [])
return session_playlist
def api_getplayables(self):
"""DEPRECATED"""
return json.dumps(cherry.config['media.playable'])
def api_getuserlist(self):
if cherrypy.session['admin']:
userlist = self.userdb.getUserList()
for user in userlist:
if user['id'] == cherrypy.session['userid']:
user['deletable'] = False
user_options = self.useroptions.forUser(user['id'])
t = user_options.getOptionValue('last_time_online')
may_download = user_options.getOptionValue('media.may_download')
user['last_time_online'] = t
user['may_download'] = may_download
sortfunc = lambda user: user['last_time_online']
userlist = sorted(userlist, key=sortfunc, reverse=True)
return json.dumps({'time': int(time.time()),
'userlist': userlist})
else:
return json.dumps({'time': 0, 'userlist': []})
def api_adduser(self, username, password, isadmin):
if cherrypy.session['admin']:
if self.userdb.addUser(username, password, isadmin):
return 'added new user: %s' % username
else:
return 'error, cannot add new user!' % username
else:
return "You didn't think that would work, did you?"
def api_userchangepassword(self, oldpassword, newpassword, username=''):
isself = username == ''
if isself:
username = cherrypy.session['username']
authed_user = self.userdb.auth(username, oldpassword)
is_authenticated = userdb.User.nobody() != authed_user
if not is_authenticated:
raise cherrypy.HTTPError(403, "Forbidden")
if isself or cherrypy.session['admin']:
return self.userdb.changePassword(username, newpassword)
else:
raise cherrypy.HTTPError(403, "Forbidden")
def api_userdelete(self, userid):
is_self = cherrypy.session['userid'] == userid
if cherrypy.session['admin'] and not is_self:
deleted = self.userdb.deleteUser(userid)
return 'success' if deleted else 'failed'
else:
return "You didn't think that would work, did you?"
def api_showplaylists(self, sortby="created", filterby=''):
playlists = self.playlistdb.showPlaylists(self.getUserId(), filterby)
curr_time = int(time.time())
is_reverse = False
#translate userids to usernames:
for pl in playlists:
pl['username'] = self.userdb.getNameById(pl['userid'])
pl['type'] = 'playlist'
pl['age'] = curr_time - pl['created']
if sortby[0] == '-':
is_reverse = True
sortby = sortby[1:]
if not sortby in ('username', 'age', 'title', 'default'):
sortby = 'created'
if sortby == 'default':
sortby = 'age'
is_reverse = False
playlists = sorted(playlists, key=lambda x: x[sortby], reverse = is_reverse)
return playlists
def api_logout(self):
cherrypy.lib.sessions.expire()
api_logout.no_auth = True
def api_downloadpls(self, plid, hostaddr):
userid = self.getUserId()
pls = self.playlistdb.createPLS(plid=plid, userid=userid, addrstr=hostaddr)
name = self.playlistdb.getName(plid, userid)
if pls and name:
return self.serve_string_as_file(pls, name+'.pls')
api_downloadpls.binary = True
def api_downloadm3u(self, plid, hostaddr):
userid = self.getUserId()
pls = self.playlistdb.createM3U(plid=plid, userid=userid, addrstr=hostaddr)
name = self.playlistdb.getName(plid, userid)
if pls and name:
return self.serve_string_as_file(pls, name+'.m3u')
api_downloadm3u.binary = True
def export_playlists(self, format, all=False, hostaddr=''):
userid = self.getUserId()
if not userid:
raise cherrypy.HTTPError(401, _("Please log in"))
hostaddr = (hostaddr.strip().rstrip('/') + cherry.config['server.rootpath']).rstrip('/')
format = format.lower()
if format == 'm3u':
filemaker = self.playlistdb.createM3U
elif format == 'pls':
filemaker = self.playlistdb.createPLS
else:
raise cherrypy.HTTPError(400,
_('Unknown playlist format: {format!r}').format(format=format))
playlists = self.playlistdb.showPlaylists(userid, include_public=all)
if not playlists:
raise cherrypy.HTTPError(404, _('No playlists found'))
with MemoryZipFile() as zip:
for pl in playlists:
plid = pl['plid']
plstr = filemaker(plid=plid, userid=userid, addrstr=hostaddr)
name = self.playlistdb.getName(plid, userid) + '.' + format
if not pl['owner']:
username = self.userdb.getNameById(pl['userid'])
name = username + '/' + name
zip.writestr(name, plstr)
zipmime = 'application/x-zip-compressed'
zipname = 'attachment; filename="playlists.zip"'
cherrypy.response.headers["Content-Type"] = zipmime
cherrypy.response.headers['Content-Disposition'] = zipname
return zip.getbytes()
export_playlists.exposed = True
def api_getsonginfo(self, path):
basedir = cherry.config['media.basedir']
abspath = os.path.join(basedir, path)
return json.dumps(metainfo.getSongInfo(abspath).dict())
def api_getencoders(self):
return json.dumps(audiotranscode.getEncoders())
def api_getdecoders(self):
return json.dumps(audiotranscode.getDecoders())
def api_transcodingenabled(self):
return json.dumps(cherry.config['media.transcode'])
def api_updatedb(self):
self.model.updateLibrary()
return 'success'
def api_getconfiguration(self):
clientconfigkeys = {
'transcodingenabled': cherry.config['media.transcode'],
'fetchalbumart': cherry.config['media.fetch_album_art'],
'isadmin': cherrypy.session['admin'],
'username': cherrypy.session['username'],
'servepath': 'serve/',
'transcodepath': 'trans/',
'auto_login': self.autoLoginActive(),
'version': cherry.REPO_VERSION or cherry.VERSION,
}
if cherry.config['media.transcode']:
decoders = list(self.model.transcoder.available_decoder_formats())
clientconfigkeys['getdecoders'] = decoders
encoders = list(self.model.transcoder.available_encoder_formats())
clientconfigkeys['getencoders'] = encoders
else:
clientconfigkeys['getdecoders'] = []
clientconfigkeys['getencoders'] = []
return clientconfigkeys
def serve_string_as_file(self, string, filename):
content_disposition = 'attachment; filename="'+filename+'"'
cherrypy.response.headers["Content-Type"] = "application/x-download"
cherrypy.response.headers["Content-Disposition"] = content_disposition
return codecs.encode(string, "UTF-8")
def _save_and_release_session():
""" workaround to cleanly release FileSessions in Cherrypy >= 3.3
From https://github.com/devsnd/cherrymusic/issues/483:
> CherryPy >=3.3.0 (up to current version, 3.6) makes it impossible to
> explicitly release FileSession locks, because:
> 1. FileSession.save() asserts that the session is locked; and
> 2. _cptools.SessionTool always adds a hook to call sessions.save
> before the response is finalized.
> If we still want to release the session in a controller, I guess the
> best way to work around this is to remove the hook before the
> controller returns:
"""
cherrypy.session.save()
hooks = cherrypy.serving.request.hooks['before_finalize']
forbidden = cherrypy.lib.sessions.save
hooks[:] = [h for h in hooks if h.callback is not forbidden]
# there's likely only one hook, since a 2nd call to save would always fail;
# but let's be safe, and block all calls to save :)
| ./CrossVul/dataset_final_sorted/CWE-22/py/good_1777_0 |
crossvul-python_data_bad_3680_1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# Copyright 2011, Piston Cloud Computing, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods to resize, repartition, and modify disk images.
Includes injection of SSH PGP keys into authorized_keys file.
"""
import crypt
import json
import os
import random
import re
import tempfile
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova import utils
from nova.virt.disk import guestfs
from nova.virt.disk import loop
from nova.virt.disk import nbd
LOG = logging.getLogger(__name__)
disk_opts = [
cfg.StrOpt('injected_network_template',
default='$pybasedir/nova/virt/interfaces.template',
help='Template file for injected network'),
cfg.ListOpt('img_handlers',
default=['loop', 'nbd', 'guestfs'],
help='Order of methods used to mount disk images'),
# NOTE(yamahata): ListOpt won't work because the command may include a
# comma. For example:
#
# mkfs.ext3 -O dir_index,extent -E stride=8,stripe-width=16
# --label %(fs_label)s %(target)s
#
# list arguments are comma separated and there is no way to
# escape such commas.
#
cfg.MultiStrOpt('virt_mkfs',
default=[
'default=mkfs.ext3 -L %(fs_label)s -F %(target)s',
'linux=mkfs.ext3 -L %(fs_label)s -F %(target)s',
'windows=mkfs.ntfs'
' --force --fast --label %(fs_label)s %(target)s',
# NOTE(yamahata): vfat case
#'windows=mkfs.vfat -n %(fs_label)s %(target)s',
],
help='mkfs commands for ephemeral device. '
'The format is <os_type>=<mkfs command>'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(disk_opts)
_MKFS_COMMAND = {}
_DEFAULT_MKFS_COMMAND = None
for s in FLAGS.virt_mkfs:
# NOTE(yamahata): mkfs command may includes '=' for its options.
# So item.partition('=') doesn't work here
os_type, mkfs_command = s.split('=', 1)
if os_type:
_MKFS_COMMAND[os_type] = mkfs_command
if os_type == 'default':
_DEFAULT_MKFS_COMMAND = mkfs_command
_QEMU_VIRT_SIZE_REGEX = re.compile('^virtual size: (.*) \(([0-9]+) bytes\)',
re.MULTILINE)
def mkfs(os_type, fs_label, target):
mkfs_command = (_MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) or
'') % locals()
if mkfs_command:
utils.execute(*mkfs_command.split())
def get_image_virtual_size(image):
out, _err = utils.execute('qemu-img', 'info', image)
m = _QEMU_VIRT_SIZE_REGEX.search(out)
return int(m.group(2))
def extend(image, size):
"""Increase image to size"""
# NOTE(MotoKen): check image virtual size before resize
virt_size = get_image_virtual_size(image)
if virt_size >= size:
return
utils.execute('qemu-img', 'resize', image, size)
# NOTE(vish): attempts to resize filesystem
utils.execute('e2fsck', '-fp', image, check_exit_code=False)
utils.execute('resize2fs', image, check_exit_code=False)
def bind(src, target, instance_name):
"""Bind device to a filesytem"""
if src:
utils.execute('touch', target, run_as_root=True)
utils.execute('mount', '-o', 'bind', src, target,
run_as_root=True)
s = os.stat(src)
cgroup_info = "b %s:%s rwm\n" % (os.major(s.st_rdev),
os.minor(s.st_rdev))
cgroups_path = \
"/sys/fs/cgroup/devices/libvirt/lxc/%s/devices.allow" \
% instance_name
utils.execute('tee', cgroups_path,
process_input=cgroup_info, run_as_root=True)
def unbind(target):
if target:
utils.execute('umount', target, run_as_root=True)
class _DiskImage(object):
"""Provide operations on a disk image file."""
def __init__(self, image, partition=None, use_cow=False, mount_dir=None):
# These passed to each mounter
self.image = image
self.partition = partition
self.mount_dir = mount_dir
# Internal
self._mkdir = False
self._mounter = None
self._errors = []
# As a performance tweak, don't bother trying to
# directly loopback mount a cow image.
self.handlers = FLAGS.img_handlers[:]
if use_cow and 'loop' in self.handlers:
self.handlers.remove('loop')
if not self.handlers:
raise exception.Error(_('no capable image handler configured'))
@property
def errors(self):
"""Return the collated errors from all operations."""
return '\n--\n'.join([''] + self._errors)
@staticmethod
def _handler_class(mode):
"""Look up the appropriate class to use based on MODE."""
for cls in (loop.Mount, nbd.Mount, guestfs.Mount):
if cls.mode == mode:
return cls
raise exception.Error(_("unknown disk image handler: %s") % mode)
def mount(self):
"""Mount a disk image, using the object attributes.
The first supported means provided by the mount classes is used.
True, or False is returned and the 'errors' attribute
contains any diagnostics.
"""
if self._mounter:
raise exception.Error(_('image already mounted'))
if not self.mount_dir:
self.mount_dir = tempfile.mkdtemp()
self._mkdir = True
try:
for h in self.handlers:
mounter_cls = self._handler_class(h)
mounter = mounter_cls(image=self.image,
partition=self.partition,
mount_dir=self.mount_dir)
if mounter.do_mount():
self._mounter = mounter
break
else:
LOG.debug(mounter.error)
self._errors.append(mounter.error)
finally:
if not self._mounter:
self.umount() # rmdir
return bool(self._mounter)
def umount(self):
"""Unmount a disk image from the file system."""
try:
if self._mounter:
self._mounter.do_umount()
finally:
if self._mkdir:
os.rmdir(self.mount_dir)
# Public module functions
def inject_data(image,
key=None, net=None, metadata=None, admin_password=None,
partition=None, use_cow=False):
"""Injects a ssh key and optionally net data into a disk image.
it will mount the image as a fully partitioned disk and attempt to inject
into the specified partition number.
If partition is not specified it mounts the image as a single partition.
"""
img = _DiskImage(image=image, partition=partition, use_cow=use_cow)
if img.mount():
try:
inject_data_into_fs(img.mount_dir,
key, net, metadata, admin_password,
utils.execute)
finally:
img.umount()
else:
raise exception.Error(img.errors)
def inject_files(image, files, partition=None, use_cow=False):
"""Injects arbitrary files into a disk image"""
img = _DiskImage(image=image, partition=partition, use_cow=use_cow)
if img.mount():
try:
for (path, contents) in files:
_inject_file_into_fs(img.mount_dir, path, contents)
finally:
img.umount()
else:
raise exception.Error(img.errors)
def setup_container(image, container_dir=None, use_cow=False):
"""Setup the LXC container.
It will mount the loopback image to the container directory in order
to create the root filesystem for the container.
LXC does not support qcow2 images yet.
"""
try:
img = _DiskImage(image=image, use_cow=use_cow, mount_dir=container_dir)
if img.mount():
return img
else:
raise exception.Error(img.errors)
except Exception, exn:
LOG.exception(_('Failed to mount filesystem: %s'), exn)
def destroy_container(img):
"""Destroy the container once it terminates.
It will umount the container that is mounted,
and delete any linked devices.
LXC does not support qcow2 images yet.
"""
try:
if img:
img.umount()
except Exception, exn:
LOG.exception(_('Failed to remove container: %s'), exn)
def inject_data_into_fs(fs, key, net, metadata, admin_password, execute):
"""Injects data into a filesystem already mounted by the caller.
Virt connections can call this directly if they mount their fs
in a different way to inject_data
"""
if key:
_inject_key_into_fs(key, fs, execute=execute)
if net:
_inject_net_into_fs(net, fs, execute=execute)
if metadata:
_inject_metadata_into_fs(metadata, fs, execute=execute)
if admin_password:
_inject_admin_password_into_fs(admin_password, fs, execute=execute)
def _inject_file_into_fs(fs, path, contents):
absolute_path = os.path.join(fs, path.lstrip('/'))
parent_dir = os.path.dirname(absolute_path)
utils.execute('mkdir', '-p', parent_dir, run_as_root=True)
utils.execute('tee', absolute_path, process_input=contents,
run_as_root=True)
def _inject_metadata_into_fs(metadata, fs, execute=None):
metadata_path = os.path.join(fs, "meta.js")
metadata = dict([(m.key, m.value) for m in metadata])
utils.execute('tee', metadata_path,
process_input=json.dumps(metadata), run_as_root=True)
def _inject_key_into_fs(key, fs, execute=None):
"""Add the given public ssh key to root's authorized_keys.
key is an ssh key string.
fs is the path to the base of the filesystem into which to inject the key.
"""
sshdir = os.path.join(fs, 'root', '.ssh')
utils.execute('mkdir', '-p', sshdir, run_as_root=True)
utils.execute('chown', 'root', sshdir, run_as_root=True)
utils.execute('chmod', '700', sshdir, run_as_root=True)
keyfile = os.path.join(sshdir, 'authorized_keys')
key_data = [
'\n',
'# The following ssh key was injected by Nova',
'\n',
key.strip(),
'\n',
]
utils.execute('tee', '-a', keyfile,
process_input=''.join(key_data), run_as_root=True)
def _inject_net_into_fs(net, fs, execute=None):
"""Inject /etc/network/interfaces into the filesystem rooted at fs.
net is the contents of /etc/network/interfaces.
"""
netdir = os.path.join(os.path.join(fs, 'etc'), 'network')
utils.execute('mkdir', '-p', netdir, run_as_root=True)
utils.execute('chown', 'root:root', netdir, run_as_root=True)
utils.execute('chmod', 755, netdir, run_as_root=True)
netfile = os.path.join(netdir, 'interfaces')
utils.execute('tee', netfile, process_input=net, run_as_root=True)
def _inject_admin_password_into_fs(admin_passwd, fs, execute=None):
"""Set the root password to admin_passwd
admin_password is a root password
fs is the path to the base of the filesystem into which to inject
the key.
This method modifies the instance filesystem directly,
and does not require a guest agent running in the instance.
"""
# The approach used here is to copy the password and shadow
# files from the instance filesystem to local files, make any
# necessary changes, and then copy them back.
admin_user = 'root'
fd, tmp_passwd = tempfile.mkstemp()
os.close(fd)
fd, tmp_shadow = tempfile.mkstemp()
os.close(fd)
utils.execute('cp', os.path.join(fs, 'etc', 'passwd'), tmp_passwd,
run_as_root=True)
utils.execute('cp', os.path.join(fs, 'etc', 'shadow'), tmp_shadow,
run_as_root=True)
_set_passwd(admin_user, admin_passwd, tmp_passwd, tmp_shadow)
utils.execute('cp', tmp_passwd, os.path.join(fs, 'etc', 'passwd'),
run_as_root=True)
os.unlink(tmp_passwd)
utils.execute('cp', tmp_shadow, os.path.join(fs, 'etc', 'shadow'),
run_as_root=True)
os.unlink(tmp_shadow)
def _set_passwd(username, admin_passwd, passwd_file, shadow_file):
"""set the password for username to admin_passwd
The passwd_file is not modified. The shadow_file is updated.
if the username is not found in both files, an exception is raised.
:param username: the username
:param encrypted_passwd: the encrypted password
:param passwd_file: path to the passwd file
:param shadow_file: path to the shadow password file
:returns: nothing
:raises: exception.Error(), IOError()
"""
salt_set = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789./')
# encryption algo - id pairs for crypt()
algos = {'SHA-512': '$6$', 'SHA-256': '$5$', 'MD5': '$1$', 'DES': ''}
salt = 16 * ' '
salt = ''.join([random.choice(salt_set) for c in salt])
# crypt() depends on the underlying libc, and may not support all
# forms of hash. We try md5 first. If we get only 13 characters back,
# then the underlying crypt() didn't understand the '$n$salt' magic,
# so we fall back to DES.
# md5 is the default because it's widely supported. Although the
# local crypt() might support stronger SHA, the target instance
# might not.
encrypted_passwd = crypt.crypt(admin_passwd, algos['MD5'] + salt)
if len(encrypted_passwd) == 13:
encrypted_passwd = crypt.crypt(admin_passwd, algos['DES'] + salt)
try:
p_file = open(passwd_file, 'rb')
s_file = open(shadow_file, 'rb')
# username MUST exist in passwd file or it's an error
found = False
for entry in p_file:
split_entry = entry.split(':')
if split_entry[0] == username:
found = True
break
if not found:
msg = _('User %(username)s not found in password file.')
raise exception.Error(msg % username)
# update password in the shadow file.It's an error if the
# the user doesn't exist.
new_shadow = list()
found = False
for entry in s_file:
split_entry = entry.split(':')
if split_entry[0] == username:
split_entry[1] = encrypted_passwd
found = True
new_entry = ':'.join(split_entry)
new_shadow.append(new_entry)
s_file.close()
if not found:
msg = _('User %(username)s not found in shadow file.')
raise exception.Error(msg % username)
s_file = open(shadow_file, 'wb')
for entry in new_shadow:
s_file.write(entry)
finally:
p_file.close()
s_file.close()
| ./CrossVul/dataset_final_sorted/CWE-22/py/bad_3680_1 |
crossvul-python_data_bad_4110_1 | """ Generate modern Python clients from OpenAPI """
from __future__ import annotations
import shutil
import subprocess
import sys
from pathlib import Path
from typing import Any, Dict, Optional, Sequence, Union
import httpcore
import httpx
import yaml
from jinja2 import Environment, PackageLoader
from openapi_python_client import utils
from .parser import GeneratorData, import_string_from_reference
from .parser.errors import GeneratorError
if sys.version_info.minor == 7: # version did not exist in 3.7, need to use a backport
from importlib_metadata import version
else:
from importlib.metadata import version # type: ignore
__version__ = version(__package__)
def _get_project_for_url_or_path(url: Optional[str], path: Optional[Path]) -> Union[Project, GeneratorError]:
data_dict = _get_document(url=url, path=path)
if isinstance(data_dict, GeneratorError):
return data_dict
openapi = GeneratorData.from_dict(data_dict)
if isinstance(openapi, GeneratorError):
return openapi
return Project(openapi=openapi)
def create_new_client(*, url: Optional[str], path: Optional[Path]) -> Sequence[GeneratorError]:
"""
Generate the client library
Returns:
A list containing any errors encountered when generating.
"""
project = _get_project_for_url_or_path(url=url, path=path)
if isinstance(project, GeneratorError):
return [project]
return project.build()
def update_existing_client(*, url: Optional[str], path: Optional[Path]) -> Sequence[GeneratorError]:
"""
Update an existing client library
Returns:
A list containing any errors encountered when generating.
"""
project = _get_project_for_url_or_path(url=url, path=path)
if isinstance(project, GeneratorError):
return [project]
return project.update()
def _get_document(*, url: Optional[str], path: Optional[Path]) -> Union[Dict[str, Any], GeneratorError]:
yaml_bytes: bytes
if url is not None and path is not None:
return GeneratorError(header="Provide URL or Path, not both.")
if url is not None:
try:
response = httpx.get(url)
yaml_bytes = response.content
except (httpx.HTTPError, httpcore.NetworkError):
return GeneratorError(header="Could not get OpenAPI document from provided URL")
elif path is not None:
yaml_bytes = path.read_bytes()
else:
return GeneratorError(header="No URL or Path provided")
try:
return yaml.safe_load(yaml_bytes)
except yaml.YAMLError:
return GeneratorError(header="Invalid YAML from provided source")
class Project:
TEMPLATE_FILTERS = {"snakecase": utils.snake_case, "spinalcase": utils.spinal_case}
project_name_override: Optional[str] = None
package_name_override: Optional[str] = None
def __init__(self, *, openapi: GeneratorData) -> None:
self.openapi: GeneratorData = openapi
self.env: Environment = Environment(loader=PackageLoader(__package__), trim_blocks=True, lstrip_blocks=True)
self.project_name: str = self.project_name_override or f"{openapi.title.replace(' ', '-').lower()}-client"
self.project_dir: Path = Path.cwd() / self.project_name
self.package_name: str = self.package_name_override or self.project_name.replace("-", "_")
self.package_dir: Path = self.project_dir / self.package_name
self.package_description: str = f"A client library for accessing {self.openapi.title}"
self.version: str = openapi.version
self.env.filters.update(self.TEMPLATE_FILTERS)
def build(self) -> Sequence[GeneratorError]:
""" Create the project from templates """
print(f"Generating {self.project_name}")
try:
self.project_dir.mkdir()
except FileExistsError:
return [GeneratorError(detail="Directory already exists. Delete it or use the update command.")]
self._create_package()
self._build_metadata()
self._build_models()
self._build_api()
self._reformat()
return self._get_errors()
def update(self) -> Sequence[GeneratorError]:
""" Update an existing project """
if not self.package_dir.is_dir():
raise FileNotFoundError()
print(f"Updating {self.project_name}")
shutil.rmtree(self.package_dir)
self._create_package()
self._build_models()
self._build_api()
self._reformat()
return self._get_errors()
def _reformat(self) -> None:
subprocess.run(
"isort .", cwd=self.project_dir, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
)
subprocess.run("black .", cwd=self.project_dir, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def _get_errors(self) -> Sequence[GeneratorError]:
errors = []
for collection in self.openapi.endpoint_collections_by_tag.values():
errors.extend(collection.parse_errors)
errors.extend(self.openapi.schemas.errors)
return errors
def _create_package(self) -> None:
self.package_dir.mkdir()
# Package __init__.py
package_init = self.package_dir / "__init__.py"
package_init_template = self.env.get_template("package_init.pyi")
package_init.write_text(package_init_template.render(description=self.package_description))
pytyped = self.package_dir / "py.typed"
pytyped.write_text("# Marker file for PEP 561")
def _build_metadata(self) -> None:
# Create a pyproject.toml file
pyproject_template = self.env.get_template("pyproject.toml")
pyproject_path = self.project_dir / "pyproject.toml"
pyproject_path.write_text(
pyproject_template.render(
project_name=self.project_name,
package_name=self.package_name,
version=self.version,
description=self.package_description,
)
)
# README.md
readme = self.project_dir / "README.md"
readme_template = self.env.get_template("README.md")
readme.write_text(
readme_template.render(
project_name=self.project_name, description=self.package_description, package_name=self.package_name
)
)
# .gitignore
git_ignore_path = self.project_dir / ".gitignore"
git_ignore_template = self.env.get_template(".gitignore")
git_ignore_path.write_text(git_ignore_template.render())
def _build_models(self) -> None:
# Generate models
models_dir = self.package_dir / "models"
models_dir.mkdir()
models_init = models_dir / "__init__.py"
imports = []
types_template = self.env.get_template("types.py")
types_path = models_dir / "types.py"
types_path.write_text(types_template.render())
model_template = self.env.get_template("model.pyi")
for model in self.openapi.schemas.models.values():
module_path = models_dir / f"{model.reference.module_name}.py"
module_path.write_text(model_template.render(model=model))
imports.append(import_string_from_reference(model.reference))
# Generate enums
enum_template = self.env.get_template("enum.pyi")
for enum in self.openapi.enums.values():
module_path = models_dir / f"{enum.reference.module_name}.py"
module_path.write_text(enum_template.render(enum=enum))
imports.append(import_string_from_reference(enum.reference))
models_init_template = self.env.get_template("models_init.pyi")
models_init.write_text(models_init_template.render(imports=imports))
def _build_api(self) -> None:
# Generate Client
client_path = self.package_dir / "client.py"
client_template = self.env.get_template("client.pyi")
client_path.write_text(client_template.render())
# Generate endpoints
api_dir = self.package_dir / "api"
api_dir.mkdir()
api_init = api_dir / "__init__.py"
api_init.write_text('""" Contains synchronous methods for accessing the API """')
async_api_dir = self.package_dir / "async_api"
async_api_dir.mkdir()
async_api_init = async_api_dir / "__init__.py"
async_api_init.write_text('""" Contains async methods for accessing the API """')
api_errors = self.package_dir / "errors.py"
errors_template = self.env.get_template("errors.pyi")
api_errors.write_text(errors_template.render())
endpoint_template = self.env.get_template("endpoint_module.pyi")
async_endpoint_template = self.env.get_template("async_endpoint_module.pyi")
for tag, collection in self.openapi.endpoint_collections_by_tag.items():
module_path = api_dir / f"{tag}.py"
module_path.write_text(endpoint_template.render(collection=collection))
async_module_path = async_api_dir / f"{tag}.py"
async_module_path.write_text(async_endpoint_template.render(collection=collection))
| ./CrossVul/dataset_final_sorted/CWE-22/py/bad_4110_1 |
crossvul-python_data_good_3679_1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# Copyright 2011, Piston Cloud Computing, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods to resize, repartition, and modify disk images.
Includes injection of SSH PGP keys into authorized_keys file.
"""
import crypt
import os
import random
import re
import tempfile
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.disk import guestfs
from nova.virt.disk import loop
from nova.virt.disk import nbd
LOG = logging.getLogger(__name__)
disk_opts = [
cfg.StrOpt('injected_network_template',
default='$pybasedir/nova/virt/interfaces.template',
help='Template file for injected network'),
cfg.ListOpt('img_handlers',
default=['loop', 'nbd', 'guestfs'],
help='Order of methods used to mount disk images'),
# NOTE(yamahata): ListOpt won't work because the command may include a
# comma. For example:
#
# mkfs.ext3 -O dir_index,extent -E stride=8,stripe-width=16
# --label %(fs_label)s %(target)s
#
# list arguments are comma separated and there is no way to
# escape such commas.
#
cfg.MultiStrOpt('virt_mkfs',
default=[
'default=mkfs.ext3 -L %(fs_label)s -F %(target)s',
'linux=mkfs.ext3 -L %(fs_label)s -F %(target)s',
'windows=mkfs.ntfs'
' --force --fast --label %(fs_label)s %(target)s',
# NOTE(yamahata): vfat case
#'windows=mkfs.vfat -n %(fs_label)s %(target)s',
],
help='mkfs commands for ephemeral device. '
'The format is <os_type>=<mkfs command>'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(disk_opts)
_MKFS_COMMAND = {}
_DEFAULT_MKFS_COMMAND = None
for s in FLAGS.virt_mkfs:
# NOTE(yamahata): mkfs command may includes '=' for its options.
# So item.partition('=') doesn't work here
os_type, mkfs_command = s.split('=', 1)
if os_type:
_MKFS_COMMAND[os_type] = mkfs_command
if os_type == 'default':
_DEFAULT_MKFS_COMMAND = mkfs_command
_QEMU_VIRT_SIZE_REGEX = re.compile('^virtual size: (.*) \(([0-9]+) bytes\)',
re.MULTILINE)
def mkfs(os_type, fs_label, target):
mkfs_command = (_MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) or
'') % locals()
if mkfs_command:
utils.execute(*mkfs_command.split())
def get_image_virtual_size(image):
out, _err = utils.execute('qemu-img', 'info', image)
m = _QEMU_VIRT_SIZE_REGEX.search(out)
return int(m.group(2))
def resize2fs(image, check_exit_code=False):
utils.execute('e2fsck', '-fp', image, check_exit_code=check_exit_code)
utils.execute('resize2fs', image, check_exit_code=check_exit_code)
def extend(image, size):
"""Increase image to size"""
# NOTE(MotoKen): check image virtual size before resize
virt_size = get_image_virtual_size(image)
if virt_size >= size:
return
utils.execute('qemu-img', 'resize', image, size)
# NOTE(vish): attempts to resize filesystem
resize2fs(image)
def bind(src, target, instance_name):
"""Bind device to a filesytem"""
if src:
utils.execute('touch', target, run_as_root=True)
utils.execute('mount', '-o', 'bind', src, target,
run_as_root=True)
s = os.stat(src)
cgroup_info = "b %s:%s rwm\n" % (os.major(s.st_rdev),
os.minor(s.st_rdev))
cgroups_path = ("/sys/fs/cgroup/devices/libvirt/lxc/"
"%s/devices.allow" % instance_name)
utils.execute('tee', cgroups_path,
process_input=cgroup_info, run_as_root=True)
def unbind(target):
if target:
utils.execute('umount', target, run_as_root=True)
class _DiskImage(object):
"""Provide operations on a disk image file."""
def __init__(self, image, partition=None, use_cow=False, mount_dir=None):
# These passed to each mounter
self.image = image
self.partition = partition
self.mount_dir = mount_dir
# Internal
self._mkdir = False
self._mounter = None
self._errors = []
# As a performance tweak, don't bother trying to
# directly loopback mount a cow image.
self.handlers = FLAGS.img_handlers[:]
if use_cow and 'loop' in self.handlers:
self.handlers.remove('loop')
if not self.handlers:
msg = _('no capable image handler configured')
raise exception.NovaException(msg)
@property
def errors(self):
"""Return the collated errors from all operations."""
return '\n--\n'.join([''] + self._errors)
@staticmethod
def _handler_class(mode):
"""Look up the appropriate class to use based on MODE."""
for cls in (loop.Mount, nbd.Mount, guestfs.Mount):
if cls.mode == mode:
return cls
msg = _("unknown disk image handler: %s") % mode
raise exception.NovaException(msg)
def mount(self):
"""Mount a disk image, using the object attributes.
The first supported means provided by the mount classes is used.
True, or False is returned and the 'errors' attribute
contains any diagnostics.
"""
if self._mounter:
raise exception.NovaException(_('image already mounted'))
if not self.mount_dir:
self.mount_dir = tempfile.mkdtemp()
self._mkdir = True
try:
for h in self.handlers:
mounter_cls = self._handler_class(h)
mounter = mounter_cls(image=self.image,
partition=self.partition,
mount_dir=self.mount_dir)
if mounter.do_mount():
self._mounter = mounter
break
else:
LOG.debug(mounter.error)
self._errors.append(mounter.error)
finally:
if not self._mounter:
self.umount() # rmdir
return bool(self._mounter)
def umount(self):
"""Unmount a disk image from the file system."""
try:
if self._mounter:
self._mounter.do_umount()
finally:
if self._mkdir:
os.rmdir(self.mount_dir)
# Public module functions
def inject_data(image,
key=None, net=None, metadata=None, admin_password=None,
partition=None, use_cow=False):
"""Injects a ssh key and optionally net data into a disk image.
it will mount the image as a fully partitioned disk and attempt to inject
into the specified partition number.
If partition is not specified it mounts the image as a single partition.
"""
img = _DiskImage(image=image, partition=partition, use_cow=use_cow)
if img.mount():
try:
inject_data_into_fs(img.mount_dir,
key, net, metadata, admin_password,
utils.execute)
finally:
img.umount()
else:
raise exception.NovaException(img.errors)
def inject_files(image, files, partition=None, use_cow=False):
"""Injects arbitrary files into a disk image"""
img = _DiskImage(image=image, partition=partition, use_cow=use_cow)
if img.mount():
try:
for (path, contents) in files:
_inject_file_into_fs(img.mount_dir, path, contents)
finally:
img.umount()
else:
raise exception.NovaException(img.errors)
def setup_container(image, container_dir=None, use_cow=False):
"""Setup the LXC container.
It will mount the loopback image to the container directory in order
to create the root filesystem for the container.
LXC does not support qcow2 images yet.
"""
try:
img = _DiskImage(image=image, use_cow=use_cow, mount_dir=container_dir)
if img.mount():
return img
else:
raise exception.NovaException(img.errors)
except Exception, exn:
LOG.exception(_('Failed to mount filesystem: %s'), exn)
def destroy_container(img):
"""Destroy the container once it terminates.
It will umount the container that is mounted,
and delete any linked devices.
LXC does not support qcow2 images yet.
"""
try:
if img:
img.umount()
except Exception, exn:
LOG.exception(_('Failed to remove container: %s'), exn)
def inject_data_into_fs(fs, key, net, metadata, admin_password, execute):
"""Injects data into a filesystem already mounted by the caller.
Virt connections can call this directly if they mount their fs
in a different way to inject_data
"""
if key:
_inject_key_into_fs(key, fs, execute=execute)
if net:
_inject_net_into_fs(net, fs, execute=execute)
if metadata:
_inject_metadata_into_fs(metadata, fs, execute=execute)
if admin_password:
_inject_admin_password_into_fs(admin_password, fs, execute=execute)
def _join_and_check_path_within_fs(fs, *args):
'''os.path.join() with safety check for injected file paths.
Join the supplied path components and make sure that the
resulting path we are injecting into is within the
mounted guest fs. Trying to be clever and specifying a
path with '..' in it will hit this safeguard.
'''
absolute_path = os.path.realpath(os.path.join(fs, *args))
if not absolute_path.startswith(os.path.realpath(fs) + '/'):
raise exception.Invalid(_('injected file path not valid'))
return absolute_path
def _inject_file_into_fs(fs, path, contents, append=False):
absolute_path = _join_and_check_path_within_fs(fs, path.lstrip('/'))
parent_dir = os.path.dirname(absolute_path)
utils.execute('mkdir', '-p', parent_dir, run_as_root=True)
args = []
if append:
args.append('-a')
args.append(absolute_path)
kwargs = dict(process_input=contents, run_as_root=True)
utils.execute('tee', *args, **kwargs)
def _inject_metadata_into_fs(metadata, fs, execute=None):
metadata = dict([(m.key, m.value) for m in metadata])
_inject_file_into_fs(fs, 'meta.js', jsonutils.dumps(metadata))
def _inject_key_into_fs(key, fs, execute=None):
"""Add the given public ssh key to root's authorized_keys.
key is an ssh key string.
fs is the path to the base of the filesystem into which to inject the key.
"""
sshdir = _join_and_check_path_within_fs(fs, 'root', '.ssh')
utils.execute('mkdir', '-p', sshdir, run_as_root=True)
utils.execute('chown', 'root', sshdir, run_as_root=True)
utils.execute('chmod', '700', sshdir, run_as_root=True)
keyfile = os.path.join('root', '.ssh', 'authorized_keys')
key_data = ''.join([
'\n',
'# The following ssh key was injected by Nova',
'\n',
key.strip(),
'\n',
])
_inject_file_into_fs(fs, keyfile, key_data, append=True)
def _inject_net_into_fs(net, fs, execute=None):
"""Inject /etc/network/interfaces into the filesystem rooted at fs.
net is the contents of /etc/network/interfaces.
"""
netdir = _join_and_check_path_within_fs(fs, 'etc', 'network')
utils.execute('mkdir', '-p', netdir, run_as_root=True)
utils.execute('chown', 'root:root', netdir, run_as_root=True)
utils.execute('chmod', 755, netdir, run_as_root=True)
netfile = os.path.join('etc', 'network', 'interfaces')
_inject_file_into_fs(fs, netfile, net)
def _inject_admin_password_into_fs(admin_passwd, fs, execute=None):
"""Set the root password to admin_passwd
admin_password is a root password
fs is the path to the base of the filesystem into which to inject
the key.
This method modifies the instance filesystem directly,
and does not require a guest agent running in the instance.
"""
# The approach used here is to copy the password and shadow
# files from the instance filesystem to local files, make any
# necessary changes, and then copy them back.
admin_user = 'root'
fd, tmp_passwd = tempfile.mkstemp()
os.close(fd)
fd, tmp_shadow = tempfile.mkstemp()
os.close(fd)
passwd_path = _join_and_check_path_within_fs(fs, 'etc', 'passwd')
shadow_path = _join_and_check_path_within_fs(fs, 'etc', 'shadow')
utils.execute('cp', passwd_path, tmp_passwd, run_as_root=True)
utils.execute('cp', shadow_path, tmp_shadow, run_as_root=True)
_set_passwd(admin_user, admin_passwd, tmp_passwd, tmp_shadow)
utils.execute('cp', tmp_passwd, passwd_path, run_as_root=True)
os.unlink(tmp_passwd)
utils.execute('cp', tmp_shadow, shadow_path, run_as_root=True)
os.unlink(tmp_shadow)
def _set_passwd(username, admin_passwd, passwd_file, shadow_file):
"""set the password for username to admin_passwd
The passwd_file is not modified. The shadow_file is updated.
if the username is not found in both files, an exception is raised.
:param username: the username
:param encrypted_passwd: the encrypted password
:param passwd_file: path to the passwd file
:param shadow_file: path to the shadow password file
:returns: nothing
:raises: exception.NovaException(), IOError()
"""
salt_set = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789./')
# encryption algo - id pairs for crypt()
algos = {'SHA-512': '$6$', 'SHA-256': '$5$', 'MD5': '$1$', 'DES': ''}
salt = 16 * ' '
salt = ''.join([random.choice(salt_set) for c in salt])
# crypt() depends on the underlying libc, and may not support all
# forms of hash. We try md5 first. If we get only 13 characters back,
# then the underlying crypt() didn't understand the '$n$salt' magic,
# so we fall back to DES.
# md5 is the default because it's widely supported. Although the
# local crypt() might support stronger SHA, the target instance
# might not.
encrypted_passwd = crypt.crypt(admin_passwd, algos['MD5'] + salt)
if len(encrypted_passwd) == 13:
encrypted_passwd = crypt.crypt(admin_passwd, algos['DES'] + salt)
try:
p_file = open(passwd_file, 'rb')
s_file = open(shadow_file, 'rb')
# username MUST exist in passwd file or it's an error
found = False
for entry in p_file:
split_entry = entry.split(':')
if split_entry[0] == username:
found = True
break
if not found:
msg = _('User %(username)s not found in password file.')
raise exception.NovaException(msg % username)
# update password in the shadow file.It's an error if the
# the user doesn't exist.
new_shadow = list()
found = False
for entry in s_file:
split_entry = entry.split(':')
if split_entry[0] == username:
split_entry[1] = encrypted_passwd
found = True
new_entry = ':'.join(split_entry)
new_shadow.append(new_entry)
s_file.close()
if not found:
msg = _('User %(username)s not found in shadow file.')
raise exception.NovaException(msg % username)
s_file = open(shadow_file, 'wb')
for entry in new_shadow:
s_file.write(entry)
finally:
p_file.close()
s_file.close()
| ./CrossVul/dataset_final_sorted/CWE-22/py/good_3679_1 |
crossvul-python_data_good_3539_1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Proxy AMI-related calls from cloud controller to objectstore service."""
import binascii
import os
import shutil
import tarfile
import tempfile
from xml.etree import ElementTree
import boto.s3.connection
import eventlet
from nova import crypto
import nova.db.api
from nova import exception
from nova import flags
from nova import image
from nova import log as logging
from nova import utils
from nova.api.ec2 import ec2utils
LOG = logging.getLogger("nova.image.s3")
FLAGS = flags.FLAGS
flags.DEFINE_string('image_decryption_dir', '/tmp',
'parent dir for tempdir used for image decryption')
flags.DEFINE_string('s3_access_key', 'notchecked',
'access key to use for s3 server for images')
flags.DEFINE_string('s3_secret_key', 'notchecked',
'secret key to use for s3 server for images')
class S3ImageService(object):
"""Wraps an existing image service to support s3 based register."""
def __init__(self, service=None, *args, **kwargs):
self.service = service or image.get_default_image_service()
self.service.__init__(*args, **kwargs)
def get_image_uuid(self, context, image_id):
return nova.db.api.s3_image_get(context, image_id)['uuid']
def get_image_id(self, context, image_uuid):
return nova.db.api.s3_image_get_by_uuid(context, image_uuid)['id']
def _create_image_id(self, context, image_uuid):
return nova.db.api.s3_image_create(context, image_uuid)['id']
def _translate_uuids_to_ids(self, context, images):
return [self._translate_uuid_to_id(context, img) for img in images]
def _translate_uuid_to_id(self, context, image):
def _find_or_create(image_uuid):
if image_uuid is None:
return
try:
return self.get_image_id(context, image_uuid)
except exception.NotFound:
return self._create_image_id(context, image_uuid)
image_copy = image.copy()
try:
image_id = image_copy['id']
except KeyError:
pass
else:
image_copy['id'] = _find_or_create(image_id)
for prop in ['kernel_id', 'ramdisk_id']:
try:
image_uuid = image_copy['properties'][prop]
except (KeyError, ValueError):
pass
else:
image_copy['properties'][prop] = _find_or_create(image_uuid)
return image_copy
def create(self, context, metadata, data=None):
"""Create an image.
metadata['properties'] should contain image_location.
"""
image = self._s3_create(context, metadata)
return image
def delete(self, context, image_id):
image_uuid = self.get_image_uuid(context, image_id)
self.service.delete(context, image_uuid)
def update(self, context, image_id, metadata, data=None):
image_uuid = self.get_image_uuid(context, image_id)
image = self.service.update(context, image_uuid, metadata, data)
return self._translate_uuid_to_id(context, image)
def index(self, context):
#NOTE(bcwaldon): sort asc to make sure we assign lower ids
# to older images
images = self.service.index(context, sort_dir='asc')
return self._translate_uuids_to_ids(context, images)
def detail(self, context):
#NOTE(bcwaldon): sort asc to make sure we assign lower ids
# to older images
images = self.service.detail(context, sort_dir='asc')
return self._translate_uuids_to_ids(context, images)
def show(self, context, image_id):
image_uuid = self.get_image_uuid(context, image_id)
image = self.service.show(context, image_uuid)
return self._translate_uuid_to_id(context, image)
def show_by_name(self, context, name):
image = self.service.show_by_name(context, name)
return self._translate_uuid_to_id(context, image)
def get(self, context, image_id):
image_uuid = self.get_image_uuid(context, image_id)
return self.get(self, context, image_uuid)
@staticmethod
def _conn(context):
# NOTE(vish): access and secret keys for s3 server are not
# checked in nova-objectstore
access = FLAGS.s3_access_key
secret = FLAGS.s3_secret_key
calling = boto.s3.connection.OrdinaryCallingFormat()
return boto.s3.connection.S3Connection(aws_access_key_id=access,
aws_secret_access_key=secret,
is_secure=False,
calling_format=calling,
port=FLAGS.s3_port,
host=FLAGS.s3_host)
@staticmethod
def _download_file(bucket, filename, local_dir):
key = bucket.get_key(filename)
local_filename = os.path.join(local_dir, os.path.basename(filename))
key.get_contents_to_filename(local_filename)
return local_filename
def _s3_parse_manifest(self, context, metadata, manifest):
manifest = ElementTree.fromstring(manifest)
image_format = 'ami'
image_type = 'machine'
try:
kernel_id = manifest.find('machine_configuration/kernel_id').text
if kernel_id == 'true':
image_format = 'aki'
image_type = 'kernel'
kernel_id = None
except Exception:
kernel_id = None
try:
ramdisk_id = manifest.find('machine_configuration/ramdisk_id').text
if ramdisk_id == 'true':
image_format = 'ari'
image_type = 'ramdisk'
ramdisk_id = None
except Exception:
ramdisk_id = None
try:
arch = manifest.find('machine_configuration/architecture').text
except Exception:
arch = 'x86_64'
# NOTE(yamahata):
# EC2 ec2-budlne-image --block-device-mapping accepts
# <virtual name>=<device name> where
# virtual name = {ami, root, swap, ephemeral<N>}
# where N is no negative integer
# device name = the device name seen by guest kernel.
# They are converted into
# block_device_mapping/mapping/{virtual, device}
#
# Do NOT confuse this with ec2-register's block device mapping
# argument.
mappings = []
try:
block_device_mapping = manifest.findall('machine_configuration/'
'block_device_mapping/'
'mapping')
for bdm in block_device_mapping:
mappings.append({'virtual': bdm.find('virtual').text,
'device': bdm.find('device').text})
except Exception:
mappings = []
properties = metadata['properties']
properties['project_id'] = context.project_id
properties['architecture'] = arch
def _translate_dependent_image_id(image_key, image_id):
image_id = ec2utils.ec2_id_to_id(image_id)
image_uuid = self.get_image_uuid(context, image_id)
properties['image_id'] = image_uuid
if kernel_id:
_translate_dependent_image_id('kernel_id', kernel_id)
if ramdisk_id:
_translate_dependent_image_id('ramdisk_id', ramdisk_id)
if mappings:
properties['mappings'] = mappings
metadata.update({'disk_format': image_format,
'container_format': image_format,
'status': 'queued',
'is_public': False,
'properties': properties})
metadata['properties']['image_state'] = 'pending'
#TODO(bcwaldon): right now, this removes user-defined ids.
# We need to re-enable this.
image_id = metadata.pop('id', None)
image = self.service.create(context, metadata)
# extract the new uuid and generate an int id to present back to user
image_uuid = image['id']
image['id'] = self._create_image_id(context, image_uuid)
# return image_uuid so the caller can still make use of image_service
return manifest, image, image_uuid
def _s3_create(self, context, metadata):
"""Gets a manifext from s3 and makes an image."""
image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir)
image_location = metadata['properties']['image_location']
bucket_name = image_location.split('/')[0]
manifest_path = image_location[len(bucket_name) + 1:]
bucket = self._conn(context).get_bucket(bucket_name)
key = bucket.get_key(manifest_path)
manifest = key.get_contents_as_string()
manifest, image, image_uuid = self._s3_parse_manifest(context,
metadata,
manifest)
def delayed_create():
"""This handles the fetching and decrypting of the part files."""
log_vars = {'image_location': image_location,
'image_path': image_path}
metadata['properties']['image_state'] = 'downloading'
self.service.update(context, image_uuid, metadata)
try:
parts = []
elements = manifest.find('image').getiterator('filename')
for fn_element in elements:
part = self._download_file(bucket,
fn_element.text,
image_path)
parts.append(part)
# NOTE(vish): this may be suboptimal, should we use cat?
enc_filename = os.path.join(image_path, 'image.encrypted')
with open(enc_filename, 'w') as combined:
for filename in parts:
with open(filename) as part:
shutil.copyfileobj(part, combined)
except Exception:
LOG.exception(_("Failed to download %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_download'
self.service.update(context, image_uuid, metadata)
return
metadata['properties']['image_state'] = 'decrypting'
self.service.update(context, image_uuid, metadata)
try:
hex_key = manifest.find('image/ec2_encrypted_key').text
encrypted_key = binascii.a2b_hex(hex_key)
hex_iv = manifest.find('image/ec2_encrypted_iv').text
encrypted_iv = binascii.a2b_hex(hex_iv)
# FIXME(vish): grab key from common service so this can run on
# any host.
cloud_pk = crypto.key_path(context.project_id)
dec_filename = os.path.join(image_path, 'image.tar.gz')
self._decrypt_image(enc_filename, encrypted_key,
encrypted_iv, cloud_pk,
dec_filename)
except Exception:
LOG.exception(_("Failed to decrypt %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_decrypt'
self.service.update(context, image_uuid, metadata)
return
metadata['properties']['image_state'] = 'untarring'
self.service.update(context, image_uuid, metadata)
try:
unz_filename = self._untarzip_image(image_path, dec_filename)
except Exception:
LOG.exception(_("Failed to untar %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_untar'
self.service.update(context, image_uuid, metadata)
return
metadata['properties']['image_state'] = 'uploading'
self.service.update(context, image_uuid, metadata)
try:
with open(unz_filename) as image_file:
self.service.update(context, image_uuid,
metadata, image_file)
except Exception:
LOG.exception(_("Failed to upload %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_upload'
self.service.update(context, image_uuid, metadata)
return
metadata['properties']['image_state'] = 'available'
metadata['status'] = 'active'
self.service.update(context, image_uuid, metadata)
shutil.rmtree(image_path)
eventlet.spawn_n(delayed_create)
return image
@staticmethod
def _decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
cloud_private_key, decrypted_filename):
key, err = utils.execute('openssl',
'rsautl',
'-decrypt',
'-inkey', '%s' % cloud_private_key,
process_input=encrypted_key,
check_exit_code=False)
if err:
raise exception.Error(_('Failed to decrypt private key: %s')
% err)
iv, err = utils.execute('openssl',
'rsautl',
'-decrypt',
'-inkey', '%s' % cloud_private_key,
process_input=encrypted_iv,
check_exit_code=False)
if err:
raise exception.Error(_('Failed to decrypt initialization '
'vector: %s') % err)
_out, err = utils.execute('openssl', 'enc',
'-d', '-aes-128-cbc',
'-in', '%s' % (encrypted_filename,),
'-K', '%s' % (key,),
'-iv', '%s' % (iv,),
'-out', '%s' % (decrypted_filename,),
check_exit_code=False)
if err:
raise exception.Error(_('Failed to decrypt image file '
'%(image_file)s: %(err)s') %
{'image_file': encrypted_filename,
'err': err})
@staticmethod
def _test_for_malicious_tarball(path, filename):
"""Raises exception if extracting tarball would escape extract path"""
tar_file = tarfile.open(filename, 'r|gz')
for n in tar_file.getnames():
if not os.path.abspath(os.path.join(path, n)).startswith(path):
tar_file.close()
raise exception.Error(_('Unsafe filenames in image'))
tar_file.close()
@staticmethod
def _untarzip_image(path, filename):
S3ImageService._test_for_malicious_tarball(path, filename)
tar_file = tarfile.open(filename, 'r|gz')
tar_file.extractall(path)
image_file = tar_file.getnames()[0]
tar_file.close()
return os.path.join(path, image_file)
| ./CrossVul/dataset_final_sorted/CWE-22/py/good_3539_1 |
crossvul-python_data_good_116_0 | #!/usr/bin/python -tt
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# copyright 2006 Duke University
# author seth vidal
# sync all or the newest packages from a repo to the local path
# TODO:
# have it print out list of changes
# make it work with mirrorlists (silly, really)
# man page/more useful docs
# deal nicely with a package changing but not changing names (ie: replacement)
# criteria
# if a package is not the same and smaller then reget it
# if a package is not the same and larger, delete it and get it again
# always replace metadata files if they're not the same.
import os
import sys
import shutil
import stat
from optparse import OptionParser
from urlparse import urljoin
from yumutils.i18n import _
import yum
import yum.Errors
from yum.packageSack import ListPackageSack
import rpmUtils.arch
import logging
from urlgrabber.progress import TextMeter, TextMultiFileMeter
import urlgrabber
class RepoSync(yum.YumBase):
def __init__(self, opts):
yum.YumBase.__init__(self)
self.logger = logging.getLogger('yum.verbose.reposync')
self.opts = opts
def localpkgs(directory):
names = os.listdir(directory)
cache = {}
for name in names:
fn = os.path.join(directory, name)
try:
st = os.lstat(fn)
except os.error:
continue
if stat.S_ISDIR(st.st_mode):
subcache = localpkgs(fn)
for pkg in subcache.keys():
cache[pkg] = subcache[pkg]
elif stat.S_ISREG(st.st_mode) and name.endswith(".rpm"):
cache[name] = {'path': fn, 'size': st.st_size, 'device': st.st_dev}
return cache
def parseArgs():
usage = _("""
Reposync is used to synchronize a remote yum repository to a local
directory using yum to retrieve the packages.
%s [options]
""") % sys.argv[0]
parser = OptionParser(usage=usage)
parser.add_option("-c", "--config", default='/etc/yum.conf',
help=_('config file to use (defaults to /etc/yum.conf)'))
parser.add_option("-a", "--arch", default=None,
help=_('act as if running the specified arch (default: current arch, note: does not override $releasever. x86_64 is a superset for i*86.)'))
parser.add_option("--source", default=False, dest="source", action="store_true",
help=_('operate on source packages'))
parser.add_option("-r", "--repoid", default=[], action='append',
help=_("specify repo ids to query, can be specified multiple times (default is all enabled)"))
parser.add_option("-e", "--cachedir",
help=_("directory in which to store metadata"))
parser.add_option("-t", "--tempcache", default=False, action="store_true",
help=_("Use a temp dir for storing/accessing yum-cache"))
parser.add_option("-d", "--delete", default=False, action="store_true",
help=_("delete local packages no longer present in repository"))
parser.add_option("-p", "--download_path", dest='destdir',
default=os.getcwd(), help=_("Path to download packages to: defaults to current dir"))
parser.add_option("--norepopath", dest='norepopath', default=False, action="store_true",
help=_("Don't add the reponame to the download path. Can only be used when syncing a single repository (default is to add the reponame)"))
parser.add_option("-g", "--gpgcheck", default=False, action="store_true",
help=_("Remove packages that fail GPG signature checking after downloading"))
parser.add_option("-u", "--urls", default=False, action="store_true",
help=_("Just list urls of what would be downloaded, don't download"))
parser.add_option("-n", "--newest-only", dest='newest', default=False, action="store_true",
help=_("Download only newest packages per-repo"))
parser.add_option("-q", "--quiet", default=False, action="store_true",
help=_("Output as little as possible"))
parser.add_option("-l", "--plugins", default=False, action="store_true",
help=_("enable yum plugin support"))
parser.add_option("-m", "--downloadcomps", default=False, action="store_true",
help=_("also download comps.xml"))
parser.add_option("", "--download-metadata", dest="downloadmd",
default=False, action="store_true",
help=_("download all the non-default metadata"))
(opts, args) = parser.parse_args()
return (opts, args)
def main():
(opts, dummy) = parseArgs()
if not os.path.exists(opts.destdir) and not opts.urls:
try:
os.makedirs(opts.destdir)
except OSError, e:
print >> sys.stderr, _("Error: Cannot create destination dir %s") % opts.destdir
sys.exit(1)
if not os.access(opts.destdir, os.W_OK) and not opts.urls:
print >> sys.stderr, _("Error: Cannot write to destination dir %s") % opts.destdir
sys.exit(1)
my = RepoSync(opts=opts)
my.doConfigSetup(fn=opts.config, init_plugins=opts.plugins)
# Force unprivileged users to have a private temporary cachedir
# if they've not given an explicit cachedir
if os.getuid() != 0 and not opts.cachedir:
opts.tempcache = True
if opts.tempcache:
if not my.setCacheDir(force=True, reuse=False):
print >> sys.stderr, _("Error: Could not make cachedir, exiting")
sys.exit(50)
my.conf.uid = 1 # force locking of user cache
elif opts.cachedir:
my.repos.setCacheDir(opts.cachedir)
# Lock if they've not given an explicit cachedir
if not opts.cachedir:
try:
my.doLock()
except yum.Errors.LockError, e:
print >> sys.stderr, _("Error: %s") % e
sys.exit(50)
# Use progress bar display when downloading repo metadata
# and package files ... needs to be setup before .repos (ie. RHN/etc.).
if not opts.quiet:
my.repos.setProgressBar(TextMeter(fo=sys.stdout), TextMultiFileMeter(fo=sys.stdout))
my.doRepoSetup()
if len(opts.repoid) > 0:
myrepos = []
# find the ones we want
for glob in opts.repoid:
add_repos = my.repos.findRepos(glob)
if not add_repos:
print >> sys.stderr, _("Warning: cannot find repository %s") % glob
continue
myrepos.extend(add_repos)
if not myrepos:
print >> sys.stderr, _("No repositories found")
sys.exit(1)
# disable them all
for repo in my.repos.repos.values():
repo.disable()
# enable the ones we like
for repo in myrepos:
repo.enable()
# --norepopath can only be sensibly used with a single repository:
if len(my.repos.listEnabled()) > 1 and opts.norepopath:
print >> sys.stderr, _("Error: Can't use --norepopath with multiple repositories")
sys.exit(1)
try:
arches = rpmUtils.arch.getArchList(opts.arch)
if opts.source:
arches += ['src']
my.doSackSetup(arches)
except yum.Errors.RepoError, e:
print >> sys.stderr, _("Error setting up repositories: %s") % e
# maybe this shouldn't be entirely fatal
sys.exit(1)
exit_code = 0
for repo in my.repos.listEnabled():
reposack = ListPackageSack(my.pkgSack.returnPackages(repoid=repo.id))
if opts.newest:
download_list = reposack.returnNewestByNameArch()
else:
download_list = list(reposack)
if opts.norepopath:
local_repo_path = opts.destdir
else:
local_repo_path = opts.destdir + '/' + repo.id
if opts.delete and os.path.exists(local_repo_path):
current_pkgs = localpkgs(local_repo_path)
download_set = {}
for pkg in download_list:
rpmname = os.path.basename(pkg.remote_path)
download_set[rpmname] = 1
for pkg in current_pkgs:
if pkg in download_set:
continue
if not opts.quiet:
my.logger.info("Removing obsolete %s", pkg)
os.unlink(current_pkgs[pkg]['path'])
if opts.downloadcomps or opts.downloadmd:
if not os.path.exists(local_repo_path):
try:
os.makedirs(local_repo_path)
except IOError, e:
my.logger.error("Could not make repo subdir: %s" % e)
my.closeRpmDB()
sys.exit(1)
if opts.downloadcomps:
wanted_types = ['group']
if opts.downloadmd:
wanted_types = repo.repoXML.fileTypes()
for ftype in repo.repoXML.fileTypes():
if ftype in ['primary', 'primary_db', 'filelists',
'filelists_db', 'other', 'other_db']:
continue
if ftype not in wanted_types:
continue
try:
resultfile = repo.retrieveMD(ftype)
basename = os.path.basename(resultfile)
if ftype == 'group' and opts.downloadcomps: # for compat with how --downloadcomps saved the comps file always as comps.xml
basename = 'comps.xml'
shutil.copyfile(resultfile, "%s/%s" % (local_repo_path, basename))
except yum.Errors.RepoMDError, e:
if not opts.quiet:
my.logger.error("Unable to fetch metadata: %s" % e)
remote_size = 0
if not opts.urls:
for pkg in download_list:
local = os.path.join(local_repo_path, pkg.remote_path)
sz = int(pkg.returnSimple('packagesize'))
if os.path.exists(local) and os.path.getsize(local) == sz:
continue
remote_size += sz
if hasattr(urlgrabber.progress, 'text_meter_total_size'):
urlgrabber.progress.text_meter_total_size(remote_size)
download_list.sort(key=lambda pkg: pkg.name)
if opts.urls:
for pkg in download_list:
local = os.path.join(local_repo_path, pkg.remote_path)
if not (os.path.exists(local) and my.verifyPkg(local, pkg, False)):
print urljoin(pkg.repo.urls[0], pkg.remote_path)
continue
# create dest dir
if not os.path.exists(local_repo_path):
os.makedirs(local_repo_path)
# set localpaths
for pkg in download_list:
pkg.localpath = os.path.join(local_repo_path, pkg.remote_path)
pkg.repo.copy_local = True
pkg.repo.cache = 0
localdir = os.path.dirname(pkg.localpath)
if not os.path.exists(localdir):
os.makedirs(localdir)
# use downloader from YumBase
probs = my.downloadPkgs(download_list)
if probs:
exit_code = 1
for key in probs:
for error in probs[key]:
my.logger.error('%s: %s', key, error)
if opts.gpgcheck:
for pkg in download_list:
result, error = my.sigCheckPkg(pkg)
if result != 0:
rpmfn = os.path.basename(pkg.remote_path)
if result == 1:
my.logger.warning('Removing %s, due to missing GPG key.' % rpmfn)
elif result == 2:
my.logger.warning('Removing %s due to failed signature check.' % rpmfn)
else:
my.logger.warning('Removing %s due to failed signature check: %s' % rpmfn)
os.unlink(pkg.localpath)
exit_code = 1
continue
my.closeRpmDB()
sys.exit(exit_code)
if __name__ == "__main__":
main()
| ./CrossVul/dataset_final_sorted/CWE-22/py/good_116_0 |
crossvul-python_data_good_2815_0 | # -*- coding: utf-8 -*-
'''
A few checks to make sure the environment is sane
'''
from __future__ import absolute_import
# Original Author: Jeff Schroeder <jeffschroeder@computer.org>
# Import python libs
import os
import re
import sys
import stat
import errno
import socket
import logging
# Import third party libs
try:
import win32file
except ImportError:
import resource
# Import salt libs
from salt.log import is_console_configured
from salt.log.setup import LOG_LEVELS
from salt.exceptions import SaltClientError, SaltSystemExit, \
CommandExecutionError
import salt.defaults.exitcodes
import salt.utils
log = logging.getLogger(__name__)
def zmq_version():
'''
ZeroMQ python bindings >= 2.1.9 are required
'''
try:
import zmq
except Exception:
# Return True for local mode
return True
ver = zmq.__version__
# The last matched group can be None if the version
# is something like 3.1 and that will work properly
match = re.match(r'^(\d+)\.(\d+)(?:\.(\d+))?', ver)
# Fallthrough and hope for the best
if not match:
msg = "Using untested zmq python bindings version: '{0}'".format(ver)
if is_console_configured():
log.warning(msg)
else:
sys.stderr.write("WARNING {0}\n".format(msg))
return True
major, minor, point = match.groups()
if major.isdigit():
major = int(major)
if minor.isdigit():
minor = int(minor)
# point very well could be None
if point and point.isdigit():
point = int(point)
if major == 2 and minor == 1:
# zmq 2.1dev could be built against a newer libzmq
if "dev" in ver and not point:
msg = 'Using dev zmq module, please report unexpected results'
if is_console_configured():
log.warning(msg)
else:
sys.stderr.write("WARNING: {0}\n".format(msg))
return True
elif point and point >= 9:
return True
elif major > 2 or (major == 2 and minor > 1):
return True
# If all else fails, gracefully croak and warn the user
log.critical('ZeroMQ python bindings >= 2.1.9 are required')
if 'salt-master' in sys.argv[0]:
msg = ('The Salt Master is unstable using a ZeroMQ version '
'lower than 2.1.11 and requires this fix: http://lists.zeromq.'
'org/pipermail/zeromq-dev/2011-June/012094.html')
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write('CRITICAL {0}\n'.format(msg))
return False
def lookup_family(hostname):
'''
Lookup a hostname and determine its address family. The first address returned
will be AF_INET6 if the system is IPv6-enabled, and AF_INET otherwise.
'''
# If lookups fail, fall back to AF_INET sockets (and v4 addresses).
fallback = socket.AF_INET
try:
hostnames = socket.getaddrinfo(
hostname or None, None, socket.AF_UNSPEC, socket.SOCK_STREAM
)
if not hostnames:
return fallback
h = hostnames[0]
return h[0]
except socket.gaierror:
return fallback
def verify_socket(interface, pub_port, ret_port):
'''
Attempt to bind to the sockets to verify that they are available
'''
addr_family = lookup_family(interface)
for port in pub_port, ret_port:
sock = socket.socket(addr_family, socket.SOCK_STREAM)
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((interface, int(port)))
except Exception as exc:
msg = 'Unable to bind socket {0}:{1}'.format(interface, port)
if exc.args:
msg = '{0}, error: {1}'.format(msg, str(exc))
else:
msg = '{0}, this might not be a problem.'.format(msg)
msg += '; Is there another salt-master running?'
if is_console_configured():
log.warning(msg)
else:
sys.stderr.write('WARNING: {0}\n'.format(msg))
return False
finally:
sock.close()
return True
def verify_files(files, user):
'''
Verify that the named files exist and are owned by the named user
'''
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
except KeyError:
err = ('Failed to prepare the Salt environment for user '
'{0}. The user is not available.\n').format(user)
sys.stderr.write(err)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
for fn_ in files:
dirname = os.path.dirname(fn_)
try:
if dirname:
try:
os.makedirs(dirname)
except OSError as err:
if err.errno != errno.EEXIST:
raise
if not os.path.isfile(fn_):
with salt.utils.fopen(fn_, 'w+') as fp_:
fp_.write('')
except IOError as err:
if os.path.isfile(dirname):
msg = 'Failed to create path {0}, is {1} a file?'.format(fn_, dirname)
raise SaltSystemExit(msg=msg)
if err.errno != errno.EACCES:
raise
msg = 'No permissions to access "{0}", are you running as the correct user?'.format(fn_)
raise SaltSystemExit(msg=msg)
except OSError as err:
msg = 'Failed to create path "{0}" - {1}'.format(fn_, err)
raise SaltSystemExit(msg=msg)
stats = os.stat(fn_)
if uid != stats.st_uid:
try:
os.chown(fn_, uid, -1)
except OSError:
pass
return True
def verify_env(dirs, user, permissive=False, pki_dir='', skip_extra=False):
'''
Verify that the named directories are in place and that the environment
can shake the salt
'''
if salt.utils.is_windows():
return win_verify_env(dirs, permissive, pki_dir, skip_extra)
import pwd # after confirming not running Windows
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
groups = salt.utils.get_gid_list(user, include_default=False)
except KeyError:
err = ('Failed to prepare the Salt environment for user '
'{0}. The user is not available.\n').format(user)
sys.stderr.write(err)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
for dir_ in dirs:
if not dir_:
continue
if not os.path.isdir(dir_):
try:
cumask = os.umask(18) # 077
os.makedirs(dir_)
# If starting the process as root, chown the new dirs
if os.getuid() == 0:
os.chown(dir_, uid, gid)
os.umask(cumask)
except OSError as err:
msg = 'Failed to create directory path "{0}" - {1}\n'
sys.stderr.write(msg.format(dir_, err))
sys.exit(err.errno)
mode = os.stat(dir_)
# If starting the process as root, chown the new dirs
if os.getuid() == 0:
fmode = os.stat(dir_)
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
# Allow the directory to be owned by any group root
# belongs to if we say it's ok to be permissive
pass
else:
# chown the file for the new user
os.chown(dir_, uid, gid)
for subdir in [a for a in os.listdir(dir_) if 'jobs' not in a]:
fsubdir = os.path.join(dir_, subdir)
if '{0}jobs'.format(os.path.sep) in fsubdir:
continue
for root, dirs, files in os.walk(fsubdir):
for name in files:
if name.startswith('.'):
continue
path = os.path.join(root, name)
try:
fmode = os.stat(path)
except (IOError, OSError):
pass
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
pass
else:
# chown the file for the new user
os.chown(path, uid, gid)
for name in dirs:
path = os.path.join(root, name)
fmode = os.stat(path)
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
pass
else:
# chown the file for the new user
os.chown(path, uid, gid)
# Allow the pki dir to be 700 or 750, but nothing else.
# This prevents other users from writing out keys, while
# allowing the use-case of 3rd-party software (like django)
# to read in what it needs to integrate.
#
# If the permissions aren't correct, default to the more secure 700.
# If acls are enabled, the pki_dir needs to remain readable, this
# is still secure because the private keys are still only readable
# by the user running the master
if dir_ == pki_dir:
smode = stat.S_IMODE(mode.st_mode)
if smode != 448 and smode != 488:
if os.access(dir_, os.W_OK):
os.chmod(dir_, 448)
else:
msg = 'Unable to securely set the permissions of "{0}".'
msg = msg.format(dir_)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
if skip_extra is False:
# Run the extra verification checks
zmq_version()
def check_user(user):
'''
Check user and assign process uid/gid.
'''
if salt.utils.is_windows():
return True
if user == salt.utils.get_user():
return True
import pwd # after confirming not running Windows
try:
pwuser = pwd.getpwnam(user)
try:
if hasattr(os, 'initgroups'):
os.initgroups(user, pwuser.pw_gid) # pylint: disable=minimum-python-version
else:
os.setgroups(salt.utils.get_gid_list(user, include_default=False))
os.setgid(pwuser.pw_gid)
os.setuid(pwuser.pw_uid)
# We could just reset the whole environment but let's just override
# the variables we can get from pwuser
if 'HOME' in os.environ:
os.environ['HOME'] = pwuser.pw_dir
if 'SHELL' in os.environ:
os.environ['SHELL'] = pwuser.pw_shell
for envvar in ('USER', 'LOGNAME'):
if envvar in os.environ:
os.environ[envvar] = pwuser.pw_name
except OSError:
msg = 'Salt configured to run as user "{0}" but unable to switch.'
msg = msg.format(user)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
return False
except KeyError:
msg = 'User not found: "{0}"'.format(user)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
return False
return True
def list_path_traversal(path):
'''
Returns a full list of directories leading up to, and including, a path.
So list_path_traversal('/path/to/salt') would return:
['/', '/path', '/path/to', '/path/to/salt']
in that order.
This routine has been tested on Windows systems as well.
list_path_traversal('c:\\path\\to\\salt') on Windows would return:
['c:\\', 'c:\\path', 'c:\\path\\to', 'c:\\path\\to\\salt']
'''
out = [path]
(head, tail) = os.path.split(path)
if tail == '':
# paths with trailing separators will return an empty string
out = [head]
(head, tail) = os.path.split(head)
while head != out[0]:
# loop until head is the same two consecutive times
out.insert(0, head)
(head, tail) = os.path.split(head)
return out
def check_path_traversal(path, user='root', skip_perm_errors=False):
'''
Walk from the root up to a directory and verify that the current
user has access to read each directory. This is used for making
sure a user can read all parent directories of the minion's key
before trying to go and generate a new key and raising an IOError
'''
for tpath in list_path_traversal(path):
if not os.access(tpath, os.R_OK):
msg = 'Could not access {0}.'.format(tpath)
if not os.path.exists(tpath):
msg += ' Path does not exist.'
else:
current_user = salt.utils.get_user()
# Make the error message more intelligent based on how
# the user invokes salt-call or whatever other script.
if user != current_user:
msg += ' Try running as user {0}.'.format(user)
else:
msg += ' Please give {0} read permissions.'.format(user)
# We don't need to bail on config file permission errors
# if the CLI
# process is run with the -a flag
if skip_perm_errors:
return
# Propagate this exception up so there isn't a sys.exit()
# in the middle of code that could be imported elsewhere.
raise SaltClientError(msg)
def check_max_open_files(opts):
'''
Check the number of max allowed open files and adjust if needed
'''
mof_c = opts.get('max_open_files', 100000)
if sys.platform.startswith('win'):
# Check the Windows API for more detail on this
# http://msdn.microsoft.com/en-us/library/xt874334(v=vs.71).aspx
# and the python binding http://timgolden.me.uk/pywin32-docs/win32file.html
mof_s = mof_h = win32file._getmaxstdio()
else:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
accepted_keys_dir = os.path.join(opts.get('pki_dir'), 'minions')
accepted_count = len(os.listdir(accepted_keys_dir))
log.debug(
'This salt-master instance has accepted {0} minion keys.'.format(
accepted_count
)
)
level = logging.INFO
if (accepted_count * 4) <= mof_s:
# We check for the soft value of max open files here because that's the
# value the user chose to raise to.
#
# The number of accepted keys multiplied by four(4) is lower than the
# soft value, everything should be OK
return
msg = (
'The number of accepted minion keys({0}) should be lower than 1/4 '
'of the max open files soft setting({1}). '.format(
accepted_count, mof_s
)
)
if accepted_count >= mof_s:
# This should never occur, it might have already crashed
msg += 'salt-master will crash pretty soon! '
level = logging.CRITICAL
elif (accepted_count * 2) >= mof_s:
# This is way too low, CRITICAL
level = logging.CRITICAL
elif (accepted_count * 3) >= mof_s:
level = logging.WARNING
# The accepted count is more than 3 time, WARN
elif (accepted_count * 4) >= mof_s:
level = logging.INFO
if mof_c < mof_h:
msg += ('According to the system\'s hard limit, there\'s still a '
'margin of {0} to raise the salt\'s max_open_files '
'setting. ').format(mof_h - mof_c)
msg += 'Please consider raising this value.'
log.log(level=level, msg=msg)
def clean_path(root, path, subdir=False):
'''
Accepts the root the path needs to be under and verifies that the path is
under said root. Pass in subdir=True if the path can result in a
subdirectory of the root instead of having to reside directly in the root
'''
if not os.path.isabs(root):
return ''
if not os.path.isabs(path):
path = os.path.join(root, path)
path = os.path.normpath(path)
if subdir:
if path.startswith(root):
return path
else:
if os.path.dirname(path) == os.path.normpath(root):
return path
return ''
def valid_id(opts, id_):
'''
Returns if the passed id is valid
'''
try:
if any(x in id_ for x in ('/', '\\', '\0')):
return False
return bool(clean_path(opts['pki_dir'], id_))
except (AttributeError, KeyError, TypeError):
return False
def safe_py_code(code):
'''
Check a string to see if it has any potentially unsafe routines which
could be executed via python, this routine is used to improve the
safety of modules suct as virtualenv
'''
bads = (
'import',
';',
'subprocess',
'eval',
'open',
'file',
'exec',
'input')
for bad in bads:
if code.count(bad):
return False
return True
def verify_log(opts):
'''
If an insecre logging configuration is found, show a warning
'''
level = LOG_LEVELS.get(str(opts.get('log_level')).lower(), logging.NOTSET)
if level < logging.INFO:
log.warning('Insecure logging configuration detected! Sensitive data may be logged.')
def win_verify_env(dirs, permissive=False, pki_dir='', skip_extra=False):
'''
Verify that the named directories are in place and that the environment
can shake the salt
'''
import salt.utils.win_functions
import salt.utils.win_dacl
# Get the root path directory where salt is installed
path = dirs[0]
while os.path.basename(path) not in ['salt', 'salt-tests-tmpdir']:
path, base = os.path.split(path)
# Create the root path directory if missing
if not os.path.isdir(path):
os.makedirs(path)
# Set permissions to the root path directory
current_user = salt.utils.win_functions.get_current_user()
if salt.utils.win_functions.is_admin(current_user):
try:
# Make the Administrators group owner
# Use the SID to be locale agnostic
salt.utils.win_dacl.set_owner(path, 'S-1-5-32-544')
except CommandExecutionError:
msg = 'Unable to securely set the owner of "{0}".'.format(path)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
if not permissive:
try:
# Get a clean dacl by not passing an obj_name
dacl = salt.utils.win_dacl.dacl()
# Add aces to the dacl, use the GUID (locale non-specific)
# Administrators Group
dacl.add_ace('S-1-5-32-544', 'grant', 'full_control',
'this_folder_subfolders_files')
# System
dacl.add_ace('S-1-5-18', 'grant', 'full_control',
'this_folder_subfolders_files')
# Owner
dacl.add_ace('S-1-3-4', 'grant', 'full_control',
'this_folder_subfolders_files')
# Save the dacl to the object
dacl.save(path, True)
except CommandExecutionError:
msg = 'Unable to securely set the permissions of ' \
'"{0}".'.format(path)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
# Create the directories
for dir_ in dirs:
if not dir_:
continue
if not os.path.isdir(dir_):
try:
os.makedirs(dir_)
except OSError as err:
msg = 'Failed to create directory path "{0}" - {1}\n'
sys.stderr.write(msg.format(dir_, err))
sys.exit(err.errno)
# The PKI dir gets its own permissions
if dir_ == pki_dir:
try:
# Make Administrators group the owner
salt.utils.win_dacl.set_owner(path, 'S-1-5-32-544')
# Give Admins, System and Owner permissions
# Get a clean dacl by not passing an obj_name
dacl = salt.utils.win_dacl.dacl()
# Add aces to the dacl, use the GUID (locale non-specific)
# Administrators Group
dacl.add_ace('S-1-5-32-544', 'grant', 'full_control',
'this_folder_subfolders_files')
# System
dacl.add_ace('S-1-5-18', 'grant', 'full_control',
'this_folder_subfolders_files')
# Owner
dacl.add_ace('S-1-3-4', 'grant', 'full_control',
'this_folder_subfolders_files')
# Save the dacl to the object
dacl.save(dir_, True)
except CommandExecutionError:
msg = 'Unable to securely set the permissions of "{0}".'
msg = msg.format(dir_)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
if skip_extra is False:
# Run the extra verification checks
zmq_version()
| ./CrossVul/dataset_final_sorted/CWE-22/py/good_2815_0 |
crossvul-python_data_bad_115_1 | #!/usr/bin/python -tt
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# copyright 2006 Duke University
# author seth vidal
# sync all or the newest packages from a repo to the local path
# TODO:
# have it print out list of changes
# make it work with mirrorlists (silly, really)
# man page/more useful docs
# deal nicely with a package changing but not changing names (ie: replacement)
# criteria
# if a package is not the same and smaller then reget it
# if a package is not the same and larger, delete it and get it again
# always replace metadata files if they're not the same.
import os
import sys
import shutil
import stat
from optparse import OptionParser
from urlparse import urljoin
from yumutils.i18n import _
import yum
import yum.Errors
from yum.packageSack import ListPackageSack
import rpmUtils.arch
import logging
from urlgrabber.progress import TextMeter, TextMultiFileMeter
import urlgrabber
class RepoSync(yum.YumBase):
def __init__(self, opts):
yum.YumBase.__init__(self)
self.logger = logging.getLogger('yum.verbose.reposync')
self.opts = opts
def localpkgs(directory):
names = os.listdir(directory)
cache = {}
for name in names:
fn = os.path.join(directory, name)
try:
st = os.lstat(fn)
except os.error:
continue
if stat.S_ISDIR(st.st_mode):
subcache = localpkgs(fn)
for pkg in subcache.keys():
cache[pkg] = subcache[pkg]
elif stat.S_ISREG(st.st_mode) and name.endswith(".rpm"):
cache[name] = {'path': fn, 'size': st.st_size, 'device': st.st_dev}
return cache
def parseArgs():
usage = _("""
Reposync is used to synchronize a remote yum repository to a local
directory using yum to retrieve the packages.
%s [options]
""") % sys.argv[0]
parser = OptionParser(usage=usage)
parser.add_option("-c", "--config", default='/etc/yum.conf',
help=_('config file to use (defaults to /etc/yum.conf)'))
parser.add_option("-a", "--arch", default=None,
help=_('act as if running the specified arch (default: current arch, note: does not override $releasever. x86_64 is a superset for i*86.)'))
parser.add_option("--source", default=False, dest="source", action="store_true",
help=_('operate on source packages'))
parser.add_option("-r", "--repoid", default=[], action='append',
help=_("specify repo ids to query, can be specified multiple times (default is all enabled)"))
parser.add_option("-e", "--cachedir",
help=_("directory in which to store metadata"))
parser.add_option("-t", "--tempcache", default=False, action="store_true",
help=_("Use a temp dir for storing/accessing yum-cache"))
parser.add_option("-d", "--delete", default=False, action="store_true",
help=_("delete local packages no longer present in repository"))
parser.add_option("-p", "--download_path", dest='destdir',
default=os.getcwd(), help=_("Path to download packages to: defaults to current dir"))
parser.add_option("--norepopath", dest='norepopath', default=False, action="store_true",
help=_("Don't add the reponame to the download path. Can only be used when syncing a single repository (default is to add the reponame)"))
parser.add_option("-g", "--gpgcheck", default=False, action="store_true",
help=_("Remove packages that fail GPG signature checking after downloading"))
parser.add_option("-u", "--urls", default=False, action="store_true",
help=_("Just list urls of what would be downloaded, don't download"))
parser.add_option("-n", "--newest-only", dest='newest', default=False, action="store_true",
help=_("Download only newest packages per-repo"))
parser.add_option("-q", "--quiet", default=False, action="store_true",
help=_("Output as little as possible"))
parser.add_option("-l", "--plugins", default=False, action="store_true",
help=_("enable yum plugin support"))
parser.add_option("-m", "--downloadcomps", default=False, action="store_true",
help=_("also download comps.xml"))
parser.add_option("", "--download-metadata", dest="downloadmd",
default=False, action="store_true",
help=_("download all the non-default metadata"))
(opts, args) = parser.parse_args()
return (opts, args)
def main():
(opts, dummy) = parseArgs()
if not os.path.exists(opts.destdir) and not opts.urls:
try:
os.makedirs(opts.destdir)
except OSError, e:
print >> sys.stderr, _("Error: Cannot create destination dir %s") % opts.destdir
sys.exit(1)
if not os.access(opts.destdir, os.W_OK) and not opts.urls:
print >> sys.stderr, _("Error: Cannot write to destination dir %s") % opts.destdir
sys.exit(1)
my = RepoSync(opts=opts)
my.doConfigSetup(fn=opts.config, init_plugins=opts.plugins)
# Force unprivileged users to have a private temporary cachedir
# if they've not given an explicit cachedir
if os.getuid() != 0 and not opts.cachedir:
opts.tempcache = True
if opts.tempcache:
if not my.setCacheDir(force=True, reuse=False):
print >> sys.stderr, _("Error: Could not make cachedir, exiting")
sys.exit(50)
my.conf.uid = 1 # force locking of user cache
elif opts.cachedir:
my.repos.setCacheDir(opts.cachedir)
# Lock if they've not given an explicit cachedir
if not opts.cachedir:
try:
my.doLock()
except yum.Errors.LockError, e:
print >> sys.stderr, _("Error: %s") % e
sys.exit(50)
# Use progress bar display when downloading repo metadata
# and package files ... needs to be setup before .repos (ie. RHN/etc.).
if not opts.quiet:
my.repos.setProgressBar(TextMeter(fo=sys.stdout), TextMultiFileMeter(fo=sys.stdout))
my.doRepoSetup()
if len(opts.repoid) > 0:
myrepos = []
# find the ones we want
for glob in opts.repoid:
add_repos = my.repos.findRepos(glob)
if not add_repos:
print >> sys.stderr, _("Warning: cannot find repository %s") % glob
continue
myrepos.extend(add_repos)
if not myrepos:
print >> sys.stderr, _("No repositories found")
sys.exit(1)
# disable them all
for repo in my.repos.repos.values():
repo.disable()
# enable the ones we like
for repo in myrepos:
repo.enable()
# --norepopath can only be sensibly used with a single repository:
if len(my.repos.listEnabled()) > 1 and opts.norepopath:
print >> sys.stderr, _("Error: Can't use --norepopath with multiple repositories")
sys.exit(1)
try:
arches = rpmUtils.arch.getArchList(opts.arch)
if opts.source:
arches += ['src']
my.doSackSetup(arches)
except yum.Errors.RepoError, e:
print >> sys.stderr, _("Error setting up repositories: %s") % e
# maybe this shouldn't be entirely fatal
sys.exit(1)
exit_code = 0
for repo in my.repos.listEnabled():
reposack = ListPackageSack(my.pkgSack.returnPackages(repoid=repo.id))
if opts.newest:
download_list = reposack.returnNewestByNameArch()
else:
download_list = list(reposack)
if opts.norepopath:
local_repo_path = opts.destdir
else:
local_repo_path = opts.destdir + '/' + repo.id
if opts.delete and os.path.exists(local_repo_path):
current_pkgs = localpkgs(local_repo_path)
download_set = {}
for pkg in download_list:
rpmname = os.path.basename(pkg.remote_path)
download_set[rpmname] = 1
for pkg in current_pkgs:
if pkg in download_set:
continue
if not opts.quiet:
my.logger.info("Removing obsolete %s", pkg)
os.unlink(current_pkgs[pkg]['path'])
if opts.downloadcomps or opts.downloadmd:
if not os.path.exists(local_repo_path):
try:
os.makedirs(local_repo_path)
except IOError, e:
my.logger.error("Could not make repo subdir: %s" % e)
my.closeRpmDB()
sys.exit(1)
if opts.downloadcomps:
wanted_types = ['group']
if opts.downloadmd:
wanted_types = repo.repoXML.fileTypes()
for ftype in repo.repoXML.fileTypes():
if ftype in ['primary', 'primary_db', 'filelists',
'filelists_db', 'other', 'other_db']:
continue
if ftype not in wanted_types:
continue
try:
resultfile = repo.retrieveMD(ftype)
basename = os.path.basename(resultfile)
if ftype == 'group' and opts.downloadcomps: # for compat with how --downloadcomps saved the comps file always as comps.xml
basename = 'comps.xml'
shutil.copyfile(resultfile, "%s/%s" % (local_repo_path, basename))
except yum.Errors.RepoMDError, e:
if not opts.quiet:
my.logger.error("Unable to fetch metadata: %s" % e)
remote_size = 0
if not opts.urls:
for pkg in download_list:
local = os.path.join(local_repo_path, pkg.remote_path)
sz = int(pkg.returnSimple('packagesize'))
if os.path.exists(local) and os.path.getsize(local) == sz:
continue
remote_size += sz
if hasattr(urlgrabber.progress, 'text_meter_total_size'):
urlgrabber.progress.text_meter_total_size(remote_size)
download_list.sort(key=lambda pkg: pkg.name)
if opts.urls:
for pkg in download_list:
local = os.path.join(local_repo_path, pkg.remote_path)
if not (os.path.exists(local) and my.verifyPkg(local, pkg, False)):
print urljoin(pkg.repo.urls[0], pkg.remote_path)
continue
# create dest dir
if not os.path.exists(local_repo_path):
os.makedirs(local_repo_path)
# set localpaths
for pkg in download_list:
pkg.localpath = os.path.join(local_repo_path, pkg.remote_path)
pkg.repo.copy_local = True
pkg.repo.cache = 0
localdir = os.path.dirname(pkg.localpath)
if not os.path.exists(localdir):
os.makedirs(localdir)
# use downloader from YumBase
probs = my.downloadPkgs(download_list)
if probs:
exit_code = 1
for key in probs:
for error in probs[key]:
my.logger.error('%s: %s', key, error)
if opts.gpgcheck:
for pkg in download_list:
result, error = my.sigCheckPkg(pkg)
if result != 0:
rpmfn = os.path.basename(pkg.remote_path)
if result == 1:
my.logger.warning('Removing %s, due to missing GPG key.' % rpmfn)
elif result == 2:
my.logger.warning('Removing %s due to failed signature check.' % rpmfn)
else:
my.logger.warning('Removing %s due to failed signature check: %s' % rpmfn)
os.unlink(pkg.localpath)
exit_code = 1
continue
my.closeRpmDB()
sys.exit(exit_code)
if __name__ == "__main__":
main()
| ./CrossVul/dataset_final_sorted/CWE-22/py/bad_115_1 |
crossvul-python_data_good_4657_0 | #!/usr/bin/env python3
# -*-coding:UTF-8 -*
"""
The ZMQ_Feed_Q Module
=====================
This module is consuming the Redis-list created by the ZMQ_Feed_Q Module,
And save the paste on disk to allow others modules to work on them.
..todo:: Be able to choose to delete or not the saved paste after processing.
..todo:: Store the empty paste (unprocessed) somewhere in Redis.
..note:: Module ZMQ_Something_Q and ZMQ_Something are closely bound, always put
the same Subscriber name in both of them.
Requirements
------------
*Need running Redis instances.
*Need the ZMQ_Feed_Q Module running to be able to work properly.
"""
import base64
import os
import time
import uuid
from pubsublogger import publisher
from Helper import Process
import magic
def rreplace(s, old, new, occurrence):
li = s.rsplit(old, occurrence)
return new.join(li)
if __name__ == '__main__':
publisher.port = 6380
publisher.channel = 'Script'
processed_paste = 0
time_1 = time.time()
config_section = 'Global'
p = Process(config_section)
# get and sanityze PASTE DIRECTORY
PASTES_FOLDER = os.path.join(os.environ['AIL_HOME'], p.config.get("Directories", "pastes"))
PASTES_FOLDERS = PASTES_FOLDER + '/'
PASTES_FOLDERS = os.path.join(os.path.realpath(PASTES_FOLDERS), '')
# LOGGING #
publisher.info("Feed Script started to receive & publish.")
while True:
message = p.get_from_set()
# Recovering the streamed message informations.
if message is not None:
splitted = message.split()
if len(splitted) == 2:
paste, gzip64encoded = splitted
else:
# TODO Store the name of the empty paste inside a Redis-list.
print("Empty Paste: not processed")
publisher.debug("Empty Paste: {0} not processed".format(message))
continue
else:
print("Empty Queues: Waiting...")
if int(time.time() - time_1) > 30:
to_print = 'Global; ; ; ;glob Processed {0} paste(s)'.format(processed_paste)
print(to_print)
#publisher.info(to_print)
time_1 = time.time()
processed_paste = 0
time.sleep(1)
continue
# remove PASTES_FOLDER from item path (crawled item + submited)
if PASTES_FOLDERS in paste:
paste = paste.replace(PASTES_FOLDERS, '', 1)
file_name_paste = paste.split('/')[-1]
if len(file_name_paste)>255:
new_file_name_paste = '{}{}.gz'.format(file_name_paste[:215], str(uuid.uuid4()))
paste = rreplace(paste, file_name_paste, new_file_name_paste, 1)
# Creating the full filepath
filename = os.path.join(PASTES_FOLDER, paste)
filename = os.path.realpath(filename)
# incorrect filename
if not os.path.commonprefix([filename, PASTES_FOLDER]) == PASTES_FOLDER:
print('Path traversal detected {}'.format(filename))
publisher.warning('Global; Path traversal detected')
else:
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
decoded = base64.standard_b64decode(gzip64encoded)
with open(filename, 'wb') as f:
f.write(decoded)
'''try:
decoded2 = gunzip_bytes_obj(decoded)
except:
decoded2 =''
type = magic.from_buffer(decoded2, mime=True)
if type!= 'text/x-c++' and type!= 'text/html' and type!= 'text/x-c' and type!= 'text/x-python' and type!= 'text/x-php' and type!= 'application/xml' and type!= 'text/x-shellscript' and type!= 'text/plain' and type!= 'text/x-diff' and type!= 'text/x-ruby':
print('-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------')
print(filename)
print(type)
print('-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------')
'''
p.populate_set_out(paste)
processed_paste+=1
| ./CrossVul/dataset_final_sorted/CWE-22/py/good_4657_0 |
crossvul-python_data_bad_3679_1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# Copyright 2011, Piston Cloud Computing, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods to resize, repartition, and modify disk images.
Includes injection of SSH PGP keys into authorized_keys file.
"""
import crypt
import os
import random
import re
import tempfile
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.disk import guestfs
from nova.virt.disk import loop
from nova.virt.disk import nbd
LOG = logging.getLogger(__name__)
disk_opts = [
cfg.StrOpt('injected_network_template',
default='$pybasedir/nova/virt/interfaces.template',
help='Template file for injected network'),
cfg.ListOpt('img_handlers',
default=['loop', 'nbd', 'guestfs'],
help='Order of methods used to mount disk images'),
# NOTE(yamahata): ListOpt won't work because the command may include a
# comma. For example:
#
# mkfs.ext3 -O dir_index,extent -E stride=8,stripe-width=16
# --label %(fs_label)s %(target)s
#
# list arguments are comma separated and there is no way to
# escape such commas.
#
cfg.MultiStrOpt('virt_mkfs',
default=[
'default=mkfs.ext3 -L %(fs_label)s -F %(target)s',
'linux=mkfs.ext3 -L %(fs_label)s -F %(target)s',
'windows=mkfs.ntfs'
' --force --fast --label %(fs_label)s %(target)s',
# NOTE(yamahata): vfat case
#'windows=mkfs.vfat -n %(fs_label)s %(target)s',
],
help='mkfs commands for ephemeral device. '
'The format is <os_type>=<mkfs command>'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(disk_opts)
_MKFS_COMMAND = {}
_DEFAULT_MKFS_COMMAND = None
for s in FLAGS.virt_mkfs:
# NOTE(yamahata): mkfs command may includes '=' for its options.
# So item.partition('=') doesn't work here
os_type, mkfs_command = s.split('=', 1)
if os_type:
_MKFS_COMMAND[os_type] = mkfs_command
if os_type == 'default':
_DEFAULT_MKFS_COMMAND = mkfs_command
_QEMU_VIRT_SIZE_REGEX = re.compile('^virtual size: (.*) \(([0-9]+) bytes\)',
re.MULTILINE)
def mkfs(os_type, fs_label, target):
mkfs_command = (_MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) or
'') % locals()
if mkfs_command:
utils.execute(*mkfs_command.split())
def get_image_virtual_size(image):
out, _err = utils.execute('qemu-img', 'info', image)
m = _QEMU_VIRT_SIZE_REGEX.search(out)
return int(m.group(2))
def resize2fs(image, check_exit_code=False):
utils.execute('e2fsck', '-fp', image, check_exit_code=check_exit_code)
utils.execute('resize2fs', image, check_exit_code=check_exit_code)
def extend(image, size):
"""Increase image to size"""
# NOTE(MotoKen): check image virtual size before resize
virt_size = get_image_virtual_size(image)
if virt_size >= size:
return
utils.execute('qemu-img', 'resize', image, size)
# NOTE(vish): attempts to resize filesystem
resize2fs(image)
def bind(src, target, instance_name):
"""Bind device to a filesytem"""
if src:
utils.execute('touch', target, run_as_root=True)
utils.execute('mount', '-o', 'bind', src, target,
run_as_root=True)
s = os.stat(src)
cgroup_info = "b %s:%s rwm\n" % (os.major(s.st_rdev),
os.minor(s.st_rdev))
cgroups_path = ("/sys/fs/cgroup/devices/libvirt/lxc/"
"%s/devices.allow" % instance_name)
utils.execute('tee', cgroups_path,
process_input=cgroup_info, run_as_root=True)
def unbind(target):
if target:
utils.execute('umount', target, run_as_root=True)
class _DiskImage(object):
"""Provide operations on a disk image file."""
def __init__(self, image, partition=None, use_cow=False, mount_dir=None):
# These passed to each mounter
self.image = image
self.partition = partition
self.mount_dir = mount_dir
# Internal
self._mkdir = False
self._mounter = None
self._errors = []
# As a performance tweak, don't bother trying to
# directly loopback mount a cow image.
self.handlers = FLAGS.img_handlers[:]
if use_cow and 'loop' in self.handlers:
self.handlers.remove('loop')
if not self.handlers:
msg = _('no capable image handler configured')
raise exception.NovaException(msg)
@property
def errors(self):
"""Return the collated errors from all operations."""
return '\n--\n'.join([''] + self._errors)
@staticmethod
def _handler_class(mode):
"""Look up the appropriate class to use based on MODE."""
for cls in (loop.Mount, nbd.Mount, guestfs.Mount):
if cls.mode == mode:
return cls
msg = _("unknown disk image handler: %s") % mode
raise exception.NovaException(msg)
def mount(self):
"""Mount a disk image, using the object attributes.
The first supported means provided by the mount classes is used.
True, or False is returned and the 'errors' attribute
contains any diagnostics.
"""
if self._mounter:
raise exception.NovaException(_('image already mounted'))
if not self.mount_dir:
self.mount_dir = tempfile.mkdtemp()
self._mkdir = True
try:
for h in self.handlers:
mounter_cls = self._handler_class(h)
mounter = mounter_cls(image=self.image,
partition=self.partition,
mount_dir=self.mount_dir)
if mounter.do_mount():
self._mounter = mounter
break
else:
LOG.debug(mounter.error)
self._errors.append(mounter.error)
finally:
if not self._mounter:
self.umount() # rmdir
return bool(self._mounter)
def umount(self):
"""Unmount a disk image from the file system."""
try:
if self._mounter:
self._mounter.do_umount()
finally:
if self._mkdir:
os.rmdir(self.mount_dir)
# Public module functions
def inject_data(image,
key=None, net=None, metadata=None, admin_password=None,
partition=None, use_cow=False):
"""Injects a ssh key and optionally net data into a disk image.
it will mount the image as a fully partitioned disk and attempt to inject
into the specified partition number.
If partition is not specified it mounts the image as a single partition.
"""
img = _DiskImage(image=image, partition=partition, use_cow=use_cow)
if img.mount():
try:
inject_data_into_fs(img.mount_dir,
key, net, metadata, admin_password,
utils.execute)
finally:
img.umount()
else:
raise exception.NovaException(img.errors)
def inject_files(image, files, partition=None, use_cow=False):
"""Injects arbitrary files into a disk image"""
img = _DiskImage(image=image, partition=partition, use_cow=use_cow)
if img.mount():
try:
for (path, contents) in files:
_inject_file_into_fs(img.mount_dir, path, contents)
finally:
img.umount()
else:
raise exception.NovaException(img.errors)
def setup_container(image, container_dir=None, use_cow=False):
"""Setup the LXC container.
It will mount the loopback image to the container directory in order
to create the root filesystem for the container.
LXC does not support qcow2 images yet.
"""
try:
img = _DiskImage(image=image, use_cow=use_cow, mount_dir=container_dir)
if img.mount():
return img
else:
raise exception.NovaException(img.errors)
except Exception, exn:
LOG.exception(_('Failed to mount filesystem: %s'), exn)
def destroy_container(img):
"""Destroy the container once it terminates.
It will umount the container that is mounted,
and delete any linked devices.
LXC does not support qcow2 images yet.
"""
try:
if img:
img.umount()
except Exception, exn:
LOG.exception(_('Failed to remove container: %s'), exn)
def inject_data_into_fs(fs, key, net, metadata, admin_password, execute):
"""Injects data into a filesystem already mounted by the caller.
Virt connections can call this directly if they mount their fs
in a different way to inject_data
"""
if key:
_inject_key_into_fs(key, fs, execute=execute)
if net:
_inject_net_into_fs(net, fs, execute=execute)
if metadata:
_inject_metadata_into_fs(metadata, fs, execute=execute)
if admin_password:
_inject_admin_password_into_fs(admin_password, fs, execute=execute)
def _inject_file_into_fs(fs, path, contents):
absolute_path = os.path.join(fs, path.lstrip('/'))
parent_dir = os.path.dirname(absolute_path)
utils.execute('mkdir', '-p', parent_dir, run_as_root=True)
utils.execute('tee', absolute_path, process_input=contents,
run_as_root=True)
def _inject_metadata_into_fs(metadata, fs, execute=None):
metadata_path = os.path.join(fs, "meta.js")
metadata = dict([(m.key, m.value) for m in metadata])
utils.execute('tee', metadata_path,
process_input=jsonutils.dumps(metadata), run_as_root=True)
def _inject_key_into_fs(key, fs, execute=None):
"""Add the given public ssh key to root's authorized_keys.
key is an ssh key string.
fs is the path to the base of the filesystem into which to inject the key.
"""
sshdir = os.path.join(fs, 'root', '.ssh')
utils.execute('mkdir', '-p', sshdir, run_as_root=True)
utils.execute('chown', 'root', sshdir, run_as_root=True)
utils.execute('chmod', '700', sshdir, run_as_root=True)
keyfile = os.path.join(sshdir, 'authorized_keys')
key_data = [
'\n',
'# The following ssh key was injected by Nova',
'\n',
key.strip(),
'\n',
]
utils.execute('tee', '-a', keyfile,
process_input=''.join(key_data), run_as_root=True)
def _inject_net_into_fs(net, fs, execute=None):
"""Inject /etc/network/interfaces into the filesystem rooted at fs.
net is the contents of /etc/network/interfaces.
"""
netdir = os.path.join(os.path.join(fs, 'etc'), 'network')
utils.execute('mkdir', '-p', netdir, run_as_root=True)
utils.execute('chown', 'root:root', netdir, run_as_root=True)
utils.execute('chmod', 755, netdir, run_as_root=True)
netfile = os.path.join(netdir, 'interfaces')
utils.execute('tee', netfile, process_input=net, run_as_root=True)
def _inject_admin_password_into_fs(admin_passwd, fs, execute=None):
"""Set the root password to admin_passwd
admin_password is a root password
fs is the path to the base of the filesystem into which to inject
the key.
This method modifies the instance filesystem directly,
and does not require a guest agent running in the instance.
"""
# The approach used here is to copy the password and shadow
# files from the instance filesystem to local files, make any
# necessary changes, and then copy them back.
admin_user = 'root'
fd, tmp_passwd = tempfile.mkstemp()
os.close(fd)
fd, tmp_shadow = tempfile.mkstemp()
os.close(fd)
utils.execute('cp', os.path.join(fs, 'etc', 'passwd'), tmp_passwd,
run_as_root=True)
utils.execute('cp', os.path.join(fs, 'etc', 'shadow'), tmp_shadow,
run_as_root=True)
_set_passwd(admin_user, admin_passwd, tmp_passwd, tmp_shadow)
utils.execute('cp', tmp_passwd, os.path.join(fs, 'etc', 'passwd'),
run_as_root=True)
os.unlink(tmp_passwd)
utils.execute('cp', tmp_shadow, os.path.join(fs, 'etc', 'shadow'),
run_as_root=True)
os.unlink(tmp_shadow)
def _set_passwd(username, admin_passwd, passwd_file, shadow_file):
"""set the password for username to admin_passwd
The passwd_file is not modified. The shadow_file is updated.
if the username is not found in both files, an exception is raised.
:param username: the username
:param encrypted_passwd: the encrypted password
:param passwd_file: path to the passwd file
:param shadow_file: path to the shadow password file
:returns: nothing
:raises: exception.NovaException(), IOError()
"""
salt_set = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789./')
# encryption algo - id pairs for crypt()
algos = {'SHA-512': '$6$', 'SHA-256': '$5$', 'MD5': '$1$', 'DES': ''}
salt = 16 * ' '
salt = ''.join([random.choice(salt_set) for c in salt])
# crypt() depends on the underlying libc, and may not support all
# forms of hash. We try md5 first. If we get only 13 characters back,
# then the underlying crypt() didn't understand the '$n$salt' magic,
# so we fall back to DES.
# md5 is the default because it's widely supported. Although the
# local crypt() might support stronger SHA, the target instance
# might not.
encrypted_passwd = crypt.crypt(admin_passwd, algos['MD5'] + salt)
if len(encrypted_passwd) == 13:
encrypted_passwd = crypt.crypt(admin_passwd, algos['DES'] + salt)
try:
p_file = open(passwd_file, 'rb')
s_file = open(shadow_file, 'rb')
# username MUST exist in passwd file or it's an error
found = False
for entry in p_file:
split_entry = entry.split(':')
if split_entry[0] == username:
found = True
break
if not found:
msg = _('User %(username)s not found in password file.')
raise exception.NovaException(msg % username)
# update password in the shadow file.It's an error if the
# the user doesn't exist.
new_shadow = list()
found = False
for entry in s_file:
split_entry = entry.split(':')
if split_entry[0] == username:
split_entry[1] = encrypted_passwd
found = True
new_entry = ':'.join(split_entry)
new_shadow.append(new_entry)
s_file.close()
if not found:
msg = _('User %(username)s not found in shadow file.')
raise exception.NovaException(msg % username)
s_file = open(shadow_file, 'wb')
for entry in new_shadow:
s_file.write(entry)
finally:
p_file.close()
s_file.close()
| ./CrossVul/dataset_final_sorted/CWE-22/py/bad_3679_1 |
crossvul-python_data_bad_4110_3 | import re
import stringcase
def snake_case(value: str) -> str:
value = re.sub(r"([A-Z]{2,})([A-Z][a-z]|[ -_]|$)", lambda m: m.group(1).title() + m.group(2), value.strip())
value = re.sub(r"(^|[ _-])([A-Z])", lambda m: m.group(1) + m.group(2).lower(), value)
return stringcase.snakecase(value)
def pascal_case(value: str) -> str:
return stringcase.pascalcase(value)
def spinal_case(value: str) -> str:
return stringcase.spinalcase(value)
| ./CrossVul/dataset_final_sorted/CWE-22/py/bad_4110_3 |
crossvul-python_data_good_495_2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
MANY_SLASHES_PATTERN = r'[\/]+'
MANY_SLASHES_REGEX = re.compile(MANY_SLASHES_PATTERN)
def lenient_decode(value, encoding=None):
"""
Decode an encoded string and convert it to an unicode string.
Args:
value: input value
encoding: string encoding, defaults to utf-8
Returns:
(unicode) decoded value
>>> lenient_decode("Hallo")
u'Hallo'
>>> lenient_decode(u"Hallo")
u'Hallo'
>>> lenient_decode("HällöÜ")
u'H\\xe4ll\\xf6\\xdc'
"""
if isinstance(value, unicode):
return value
if encoding is None:
encoding = 'utf_8'
return value.decode(encoding, 'ignore')
def lenient_force_utf_8(value):
"""
Args:
value: input value
Returns:
(basestring) utf-8 encoded value
>>> isinstance(lenient_force_utf_8(''), basestring)
True
>>> lenient_force_utf_8(u"Hallo")
'Hallo'
>>> lenient_force_utf_8("HällöÜ")
'H\\xc3\\xa4ll\\xc3\\xb6\\xc3\\x9c'
"""
return lenient_decode(value).encode('utf_8')
def sanitise_filename_slashes(value):
"""
Args:
value: input value
Returns:
value w/o multiple slashes
>>> in_value = "///tmp/x/y/z"
>>> expected = re.sub("^/+", "/", "///tmp/x/y/z")
>>> sanitise_filename_slashes(in_value) == expected
True
"""
return re.sub(MANY_SLASHES_REGEX, '/', value)
if __name__ == '__main__':
import doctest
(FAILED, SUCCEEDED) = doctest.testmod()
print("[doctest] SUCCEEDED/FAILED: {:d}/{:d}".format(SUCCEEDED, FAILED))
| ./CrossVul/dataset_final_sorted/CWE-22/py/good_495_2 |
crossvul-python_data_good_4110_3 | import re
import stringcase
def _sanitize(value: str) -> str:
return re.sub(r"[^\w _-]+", "", value)
def group_title(value: str) -> str:
value = re.sub(r"([A-Z]{2,})([A-Z][a-z]|[ -_]|$)", lambda m: m.group(1).title() + m.group(2), value.strip())
value = re.sub(r"(^|[ _-])([A-Z])", lambda m: m.group(1) + m.group(2).lower(), value)
return value
def snake_case(value: str) -> str:
return stringcase.snakecase(group_title(_sanitize(value)))
def pascal_case(value: str) -> str:
return stringcase.pascalcase(_sanitize(value))
def kebab_case(value: str) -> str:
return stringcase.spinalcase(group_title(_sanitize(value)))
| ./CrossVul/dataset_final_sorted/CWE-22/py/good_4110_3 |
crossvul-python_data_bad_4180_0 | ########################################################################
# File name: xhu.py
# This file is part of: xmpp-http-upload
#
# LICENSE
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
########################################################################
import contextlib
import errno
import fnmatch
import json
import hashlib
import hmac
import pathlib
import typing
import flask
app = flask.Flask("xmpp-http-upload")
app.config.from_envvar("XMPP_HTTP_UPLOAD_CONFIG")
application = app
if app.config['ENABLE_CORS']:
from flask_cors import CORS
CORS(app)
def sanitized_join(path: str, root: pathlib.Path) -> pathlib.Path:
result = (root / path).absolute()
if not str(result).startswith(str(root) + "/"):
raise ValueError("resulting path is outside root")
return result
def get_paths(base_path: pathlib.Path):
data_file = pathlib.Path(str(base_path) + ".data")
metadata_file = pathlib.Path(str(base_path) + ".meta")
return data_file, metadata_file
def load_metadata(metadata_file):
with metadata_file.open("r") as f:
return json.load(f)
def get_info(path: str, root: pathlib.Path) -> typing.Tuple[
pathlib.Path,
dict]:
dest_path = sanitized_join(
path,
pathlib.Path(app.config["DATA_ROOT"]),
)
data_file, metadata_file = get_paths(dest_path)
return data_file, load_metadata(metadata_file)
@contextlib.contextmanager
def write_file(at: pathlib.Path):
with at.open("xb") as f:
try:
yield f
except: # NOQA
at.unlink()
raise
@app.route("/")
def index():
return flask.Response(
"Welcome to XMPP HTTP Upload. State your business.",
mimetype="text/plain",
)
def stream_file(src, dest, nbytes):
while nbytes > 0:
data = src.read(min(nbytes, 4096))
if not data:
break
dest.write(data)
nbytes -= len(data)
if nbytes > 0:
raise EOFError
@app.route("/<path:path>", methods=["PUT"])
def put_file(path):
try:
dest_path = sanitized_join(
path,
pathlib.Path(app.config["DATA_ROOT"]),
)
except ValueError:
return flask.Response(
"Not Found",
404,
mimetype="text/plain",
)
verification_key = flask.request.args.get("v", "")
length = int(flask.request.headers.get("Content-Length", 0))
hmac_input = "{} {}".format(path, length).encode("utf-8")
key = app.config["SECRET_KEY"]
mac = hmac.new(key, hmac_input, hashlib.sha256)
digest = mac.hexdigest()
if not hmac.compare_digest(digest, verification_key):
return flask.Response(
"Invalid verification key",
403,
mimetype="text/plain",
)
content_type = flask.request.headers.get(
"Content-Type",
"application/octet-stream",
)
dest_path.parent.mkdir(parents=True, exist_ok=True, mode=0o770)
data_file, metadata_file = get_paths(dest_path)
try:
with write_file(data_file) as fout:
stream_file(flask.request.stream, fout, length)
with metadata_file.open("x") as f:
json.dump(
{
"headers": {"Content-Type": content_type},
},
f,
)
except EOFError:
return flask.Response(
"Bad Request",
400,
mimetype="text/plain",
)
except OSError as exc:
if exc.errno == errno.EEXIST:
return flask.Response(
"Conflict",
409,
mimetype="text/plain",
)
raise
return flask.Response(
"Created",
201,
mimetype="text/plain",
)
def generate_headers(response_headers, metadata_headers):
for key, value in metadata_headers.items():
response_headers[key] = value
content_type = metadata_headers["Content-Type"]
for mimetype_glob in app.config.get("NON_ATTACHMENT_MIME_TYPES", []):
if fnmatch.fnmatch(content_type, mimetype_glob):
break
else:
response_headers["Content-Disposition"] = "attachment"
response_headers["X-Content-Type-Options"] = "nosniff"
response_headers["X-Frame-Options"] = "DENY"
response_headers["Content-Security-Policy"] = "default-src 'none'; frame-ancestors 'none'; sandbox"
@app.route("/<path:path>", methods=["HEAD"])
def head_file(path):
try:
data_file, metadata = get_info(
path,
pathlib.Path(app.config["DATA_ROOT"])
)
stat = data_file.stat()
except (OSError, ValueError):
return flask.Response(
"Not Found",
404,
mimetype="text/plain",
)
response = flask.Response()
response.headers["Content-Length"] = str(stat.st_size)
generate_headers(
response.headers,
metadata["headers"],
)
return response
@app.route("/<path:path>", methods=["GET"])
def get_file(path):
try:
data_file, metadata = get_info(
path,
pathlib.Path(app.config["DATA_ROOT"])
)
except (OSError, ValueError):
return flask.Response(
"Not Found",
404,
mimetype="text/plain",
)
response = flask.make_response(flask.send_file(
str(data_file),
))
generate_headers(
response.headers,
metadata["headers"],
)
return response
| ./CrossVul/dataset_final_sorted/CWE-22/py/bad_4180_0 |
crossvul-python_data_bad_2815_0 | # -*- coding: utf-8 -*-
'''
A few checks to make sure the environment is sane
'''
from __future__ import absolute_import
# Original Author: Jeff Schroeder <jeffschroeder@computer.org>
# Import python libs
import os
import re
import sys
import stat
import errno
import socket
import logging
# Import third party libs
try:
import win32file
except ImportError:
import resource
# Import salt libs
from salt.log import is_console_configured
from salt.log.setup import LOG_LEVELS
from salt.exceptions import SaltClientError, SaltSystemExit, \
CommandExecutionError
import salt.defaults.exitcodes
import salt.utils
log = logging.getLogger(__name__)
def zmq_version():
'''
ZeroMQ python bindings >= 2.1.9 are required
'''
try:
import zmq
except Exception:
# Return True for local mode
return True
ver = zmq.__version__
# The last matched group can be None if the version
# is something like 3.1 and that will work properly
match = re.match(r'^(\d+)\.(\d+)(?:\.(\d+))?', ver)
# Fallthrough and hope for the best
if not match:
msg = "Using untested zmq python bindings version: '{0}'".format(ver)
if is_console_configured():
log.warning(msg)
else:
sys.stderr.write("WARNING {0}\n".format(msg))
return True
major, minor, point = match.groups()
if major.isdigit():
major = int(major)
if minor.isdigit():
minor = int(minor)
# point very well could be None
if point and point.isdigit():
point = int(point)
if major == 2 and minor == 1:
# zmq 2.1dev could be built against a newer libzmq
if "dev" in ver and not point:
msg = 'Using dev zmq module, please report unexpected results'
if is_console_configured():
log.warning(msg)
else:
sys.stderr.write("WARNING: {0}\n".format(msg))
return True
elif point and point >= 9:
return True
elif major > 2 or (major == 2 and minor > 1):
return True
# If all else fails, gracefully croak and warn the user
log.critical('ZeroMQ python bindings >= 2.1.9 are required')
if 'salt-master' in sys.argv[0]:
msg = ('The Salt Master is unstable using a ZeroMQ version '
'lower than 2.1.11 and requires this fix: http://lists.zeromq.'
'org/pipermail/zeromq-dev/2011-June/012094.html')
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write('CRITICAL {0}\n'.format(msg))
return False
def lookup_family(hostname):
'''
Lookup a hostname and determine its address family. The first address returned
will be AF_INET6 if the system is IPv6-enabled, and AF_INET otherwise.
'''
# If lookups fail, fall back to AF_INET sockets (and v4 addresses).
fallback = socket.AF_INET
try:
hostnames = socket.getaddrinfo(
hostname or None, None, socket.AF_UNSPEC, socket.SOCK_STREAM
)
if not hostnames:
return fallback
h = hostnames[0]
return h[0]
except socket.gaierror:
return fallback
def verify_socket(interface, pub_port, ret_port):
'''
Attempt to bind to the sockets to verify that they are available
'''
addr_family = lookup_family(interface)
for port in pub_port, ret_port:
sock = socket.socket(addr_family, socket.SOCK_STREAM)
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((interface, int(port)))
except Exception as exc:
msg = 'Unable to bind socket {0}:{1}'.format(interface, port)
if exc.args:
msg = '{0}, error: {1}'.format(msg, str(exc))
else:
msg = '{0}, this might not be a problem.'.format(msg)
msg += '; Is there another salt-master running?'
if is_console_configured():
log.warning(msg)
else:
sys.stderr.write('WARNING: {0}\n'.format(msg))
return False
finally:
sock.close()
return True
def verify_files(files, user):
'''
Verify that the named files exist and are owned by the named user
'''
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
except KeyError:
err = ('Failed to prepare the Salt environment for user '
'{0}. The user is not available.\n').format(user)
sys.stderr.write(err)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
for fn_ in files:
dirname = os.path.dirname(fn_)
try:
if dirname:
try:
os.makedirs(dirname)
except OSError as err:
if err.errno != errno.EEXIST:
raise
if not os.path.isfile(fn_):
with salt.utils.fopen(fn_, 'w+') as fp_:
fp_.write('')
except IOError as err:
if os.path.isfile(dirname):
msg = 'Failed to create path {0}, is {1} a file?'.format(fn_, dirname)
raise SaltSystemExit(msg=msg)
if err.errno != errno.EACCES:
raise
msg = 'No permissions to access "{0}", are you running as the correct user?'.format(fn_)
raise SaltSystemExit(msg=msg)
except OSError as err:
msg = 'Failed to create path "{0}" - {1}'.format(fn_, err)
raise SaltSystemExit(msg=msg)
stats = os.stat(fn_)
if uid != stats.st_uid:
try:
os.chown(fn_, uid, -1)
except OSError:
pass
return True
def verify_env(dirs, user, permissive=False, pki_dir='', skip_extra=False):
'''
Verify that the named directories are in place and that the environment
can shake the salt
'''
if salt.utils.is_windows():
return win_verify_env(dirs, permissive, pki_dir, skip_extra)
import pwd # after confirming not running Windows
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
groups = salt.utils.get_gid_list(user, include_default=False)
except KeyError:
err = ('Failed to prepare the Salt environment for user '
'{0}. The user is not available.\n').format(user)
sys.stderr.write(err)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
for dir_ in dirs:
if not dir_:
continue
if not os.path.isdir(dir_):
try:
cumask = os.umask(18) # 077
os.makedirs(dir_)
# If starting the process as root, chown the new dirs
if os.getuid() == 0:
os.chown(dir_, uid, gid)
os.umask(cumask)
except OSError as err:
msg = 'Failed to create directory path "{0}" - {1}\n'
sys.stderr.write(msg.format(dir_, err))
sys.exit(err.errno)
mode = os.stat(dir_)
# If starting the process as root, chown the new dirs
if os.getuid() == 0:
fmode = os.stat(dir_)
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
# Allow the directory to be owned by any group root
# belongs to if we say it's ok to be permissive
pass
else:
# chown the file for the new user
os.chown(dir_, uid, gid)
for subdir in [a for a in os.listdir(dir_) if 'jobs' not in a]:
fsubdir = os.path.join(dir_, subdir)
if '{0}jobs'.format(os.path.sep) in fsubdir:
continue
for root, dirs, files in os.walk(fsubdir):
for name in files:
if name.startswith('.'):
continue
path = os.path.join(root, name)
try:
fmode = os.stat(path)
except (IOError, OSError):
pass
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
pass
else:
# chown the file for the new user
os.chown(path, uid, gid)
for name in dirs:
path = os.path.join(root, name)
fmode = os.stat(path)
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
pass
else:
# chown the file for the new user
os.chown(path, uid, gid)
# Allow the pki dir to be 700 or 750, but nothing else.
# This prevents other users from writing out keys, while
# allowing the use-case of 3rd-party software (like django)
# to read in what it needs to integrate.
#
# If the permissions aren't correct, default to the more secure 700.
# If acls are enabled, the pki_dir needs to remain readable, this
# is still secure because the private keys are still only readable
# by the user running the master
if dir_ == pki_dir:
smode = stat.S_IMODE(mode.st_mode)
if smode != 448 and smode != 488:
if os.access(dir_, os.W_OK):
os.chmod(dir_, 448)
else:
msg = 'Unable to securely set the permissions of "{0}".'
msg = msg.format(dir_)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
if skip_extra is False:
# Run the extra verification checks
zmq_version()
def check_user(user):
'''
Check user and assign process uid/gid.
'''
if salt.utils.is_windows():
return True
if user == salt.utils.get_user():
return True
import pwd # after confirming not running Windows
try:
pwuser = pwd.getpwnam(user)
try:
if hasattr(os, 'initgroups'):
os.initgroups(user, pwuser.pw_gid) # pylint: disable=minimum-python-version
else:
os.setgroups(salt.utils.get_gid_list(user, include_default=False))
os.setgid(pwuser.pw_gid)
os.setuid(pwuser.pw_uid)
# We could just reset the whole environment but let's just override
# the variables we can get from pwuser
if 'HOME' in os.environ:
os.environ['HOME'] = pwuser.pw_dir
if 'SHELL' in os.environ:
os.environ['SHELL'] = pwuser.pw_shell
for envvar in ('USER', 'LOGNAME'):
if envvar in os.environ:
os.environ[envvar] = pwuser.pw_name
except OSError:
msg = 'Salt configured to run as user "{0}" but unable to switch.'
msg = msg.format(user)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
return False
except KeyError:
msg = 'User not found: "{0}"'.format(user)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
return False
return True
def list_path_traversal(path):
'''
Returns a full list of directories leading up to, and including, a path.
So list_path_traversal('/path/to/salt') would return:
['/', '/path', '/path/to', '/path/to/salt']
in that order.
This routine has been tested on Windows systems as well.
list_path_traversal('c:\\path\\to\\salt') on Windows would return:
['c:\\', 'c:\\path', 'c:\\path\\to', 'c:\\path\\to\\salt']
'''
out = [path]
(head, tail) = os.path.split(path)
if tail == '':
# paths with trailing separators will return an empty string
out = [head]
(head, tail) = os.path.split(head)
while head != out[0]:
# loop until head is the same two consecutive times
out.insert(0, head)
(head, tail) = os.path.split(head)
return out
def check_path_traversal(path, user='root', skip_perm_errors=False):
'''
Walk from the root up to a directory and verify that the current
user has access to read each directory. This is used for making
sure a user can read all parent directories of the minion's key
before trying to go and generate a new key and raising an IOError
'''
for tpath in list_path_traversal(path):
if not os.access(tpath, os.R_OK):
msg = 'Could not access {0}.'.format(tpath)
if not os.path.exists(tpath):
msg += ' Path does not exist.'
else:
current_user = salt.utils.get_user()
# Make the error message more intelligent based on how
# the user invokes salt-call or whatever other script.
if user != current_user:
msg += ' Try running as user {0}.'.format(user)
else:
msg += ' Please give {0} read permissions.'.format(user)
# We don't need to bail on config file permission errors
# if the CLI
# process is run with the -a flag
if skip_perm_errors:
return
# Propagate this exception up so there isn't a sys.exit()
# in the middle of code that could be imported elsewhere.
raise SaltClientError(msg)
def check_max_open_files(opts):
'''
Check the number of max allowed open files and adjust if needed
'''
mof_c = opts.get('max_open_files', 100000)
if sys.platform.startswith('win'):
# Check the Windows API for more detail on this
# http://msdn.microsoft.com/en-us/library/xt874334(v=vs.71).aspx
# and the python binding http://timgolden.me.uk/pywin32-docs/win32file.html
mof_s = mof_h = win32file._getmaxstdio()
else:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
accepted_keys_dir = os.path.join(opts.get('pki_dir'), 'minions')
accepted_count = len(os.listdir(accepted_keys_dir))
log.debug(
'This salt-master instance has accepted {0} minion keys.'.format(
accepted_count
)
)
level = logging.INFO
if (accepted_count * 4) <= mof_s:
# We check for the soft value of max open files here because that's the
# value the user chose to raise to.
#
# The number of accepted keys multiplied by four(4) is lower than the
# soft value, everything should be OK
return
msg = (
'The number of accepted minion keys({0}) should be lower than 1/4 '
'of the max open files soft setting({1}). '.format(
accepted_count, mof_s
)
)
if accepted_count >= mof_s:
# This should never occur, it might have already crashed
msg += 'salt-master will crash pretty soon! '
level = logging.CRITICAL
elif (accepted_count * 2) >= mof_s:
# This is way too low, CRITICAL
level = logging.CRITICAL
elif (accepted_count * 3) >= mof_s:
level = logging.WARNING
# The accepted count is more than 3 time, WARN
elif (accepted_count * 4) >= mof_s:
level = logging.INFO
if mof_c < mof_h:
msg += ('According to the system\'s hard limit, there\'s still a '
'margin of {0} to raise the salt\'s max_open_files '
'setting. ').format(mof_h - mof_c)
msg += 'Please consider raising this value.'
log.log(level=level, msg=msg)
def clean_path(root, path, subdir=False):
'''
Accepts the root the path needs to be under and verifies that the path is
under said root. Pass in subdir=True if the path can result in a
subdirectory of the root instead of having to reside directly in the root
'''
if not os.path.isabs(root):
return ''
if not os.path.isabs(path):
path = os.path.join(root, path)
path = os.path.normpath(path)
if subdir:
if path.startswith(root):
return path
else:
if os.path.dirname(path) == os.path.normpath(root):
return path
return ''
def clean_id(id_):
'''
Returns if the passed id is clean.
'''
if re.search(r'\.\.\{sep}'.format(sep=os.sep), id_):
return False
return True
def valid_id(opts, id_):
'''
Returns if the passed id is valid
'''
try:
return bool(clean_path(opts['pki_dir'], id_)) and clean_id(id_)
except (AttributeError, KeyError, TypeError) as e:
return False
def safe_py_code(code):
'''
Check a string to see if it has any potentially unsafe routines which
could be executed via python, this routine is used to improve the
safety of modules suct as virtualenv
'''
bads = (
'import',
';',
'subprocess',
'eval',
'open',
'file',
'exec',
'input')
for bad in bads:
if code.count(bad):
return False
return True
def verify_log(opts):
'''
If an insecre logging configuration is found, show a warning
'''
level = LOG_LEVELS.get(str(opts.get('log_level')).lower(), logging.NOTSET)
if level < logging.INFO:
log.warning('Insecure logging configuration detected! Sensitive data may be logged.')
def win_verify_env(dirs, permissive=False, pki_dir='', skip_extra=False):
'''
Verify that the named directories are in place and that the environment
can shake the salt
'''
import salt.utils.win_functions
import salt.utils.win_dacl
# Get the root path directory where salt is installed
path = dirs[0]
while os.path.basename(path) not in ['salt', 'salt-tests-tmpdir']:
path, base = os.path.split(path)
# Create the root path directory if missing
if not os.path.isdir(path):
os.makedirs(path)
# Set permissions to the root path directory
current_user = salt.utils.win_functions.get_current_user()
if salt.utils.win_functions.is_admin(current_user):
try:
# Make the Administrators group owner
# Use the SID to be locale agnostic
salt.utils.win_dacl.set_owner(path, 'S-1-5-32-544')
except CommandExecutionError:
msg = 'Unable to securely set the owner of "{0}".'.format(path)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
if not permissive:
try:
# Get a clean dacl by not passing an obj_name
dacl = salt.utils.win_dacl.dacl()
# Add aces to the dacl, use the GUID (locale non-specific)
# Administrators Group
dacl.add_ace('S-1-5-32-544', 'grant', 'full_control',
'this_folder_subfolders_files')
# System
dacl.add_ace('S-1-5-18', 'grant', 'full_control',
'this_folder_subfolders_files')
# Owner
dacl.add_ace('S-1-3-4', 'grant', 'full_control',
'this_folder_subfolders_files')
# Save the dacl to the object
dacl.save(path, True)
except CommandExecutionError:
msg = 'Unable to securely set the permissions of ' \
'"{0}".'.format(path)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
# Create the directories
for dir_ in dirs:
if not dir_:
continue
if not os.path.isdir(dir_):
try:
os.makedirs(dir_)
except OSError as err:
msg = 'Failed to create directory path "{0}" - {1}\n'
sys.stderr.write(msg.format(dir_, err))
sys.exit(err.errno)
# The PKI dir gets its own permissions
if dir_ == pki_dir:
try:
# Make Administrators group the owner
salt.utils.win_dacl.set_owner(path, 'S-1-5-32-544')
# Give Admins, System and Owner permissions
# Get a clean dacl by not passing an obj_name
dacl = salt.utils.win_dacl.dacl()
# Add aces to the dacl, use the GUID (locale non-specific)
# Administrators Group
dacl.add_ace('S-1-5-32-544', 'grant', 'full_control',
'this_folder_subfolders_files')
# System
dacl.add_ace('S-1-5-18', 'grant', 'full_control',
'this_folder_subfolders_files')
# Owner
dacl.add_ace('S-1-3-4', 'grant', 'full_control',
'this_folder_subfolders_files')
# Save the dacl to the object
dacl.save(dir_, True)
except CommandExecutionError:
msg = 'Unable to securely set the permissions of "{0}".'
msg = msg.format(dir_)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
if skip_extra is False:
# Run the extra verification checks
zmq_version()
| ./CrossVul/dataset_final_sorted/CWE-22/py/bad_2815_0 |
crossvul-python_data_good_3680_1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# Copyright 2011, Piston Cloud Computing, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods to resize, repartition, and modify disk images.
Includes injection of SSH PGP keys into authorized_keys file.
"""
import crypt
import json
import os
import random
import re
import tempfile
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova import utils
from nova.virt.disk import guestfs
from nova.virt.disk import loop
from nova.virt.disk import nbd
LOG = logging.getLogger(__name__)
disk_opts = [
cfg.StrOpt('injected_network_template',
default='$pybasedir/nova/virt/interfaces.template',
help='Template file for injected network'),
cfg.ListOpt('img_handlers',
default=['loop', 'nbd', 'guestfs'],
help='Order of methods used to mount disk images'),
# NOTE(yamahata): ListOpt won't work because the command may include a
# comma. For example:
#
# mkfs.ext3 -O dir_index,extent -E stride=8,stripe-width=16
# --label %(fs_label)s %(target)s
#
# list arguments are comma separated and there is no way to
# escape such commas.
#
cfg.MultiStrOpt('virt_mkfs',
default=[
'default=mkfs.ext3 -L %(fs_label)s -F %(target)s',
'linux=mkfs.ext3 -L %(fs_label)s -F %(target)s',
'windows=mkfs.ntfs'
' --force --fast --label %(fs_label)s %(target)s',
# NOTE(yamahata): vfat case
#'windows=mkfs.vfat -n %(fs_label)s %(target)s',
],
help='mkfs commands for ephemeral device. '
'The format is <os_type>=<mkfs command>'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(disk_opts)
_MKFS_COMMAND = {}
_DEFAULT_MKFS_COMMAND = None
for s in FLAGS.virt_mkfs:
# NOTE(yamahata): mkfs command may includes '=' for its options.
# So item.partition('=') doesn't work here
os_type, mkfs_command = s.split('=', 1)
if os_type:
_MKFS_COMMAND[os_type] = mkfs_command
if os_type == 'default':
_DEFAULT_MKFS_COMMAND = mkfs_command
_QEMU_VIRT_SIZE_REGEX = re.compile('^virtual size: (.*) \(([0-9]+) bytes\)',
re.MULTILINE)
def mkfs(os_type, fs_label, target):
mkfs_command = (_MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) or
'') % locals()
if mkfs_command:
utils.execute(*mkfs_command.split())
def get_image_virtual_size(image):
out, _err = utils.execute('qemu-img', 'info', image)
m = _QEMU_VIRT_SIZE_REGEX.search(out)
return int(m.group(2))
def extend(image, size):
"""Increase image to size"""
# NOTE(MotoKen): check image virtual size before resize
virt_size = get_image_virtual_size(image)
if virt_size >= size:
return
utils.execute('qemu-img', 'resize', image, size)
# NOTE(vish): attempts to resize filesystem
utils.execute('e2fsck', '-fp', image, check_exit_code=False)
utils.execute('resize2fs', image, check_exit_code=False)
def bind(src, target, instance_name):
"""Bind device to a filesytem"""
if src:
utils.execute('touch', target, run_as_root=True)
utils.execute('mount', '-o', 'bind', src, target,
run_as_root=True)
s = os.stat(src)
cgroup_info = "b %s:%s rwm\n" % (os.major(s.st_rdev),
os.minor(s.st_rdev))
cgroups_path = \
"/sys/fs/cgroup/devices/libvirt/lxc/%s/devices.allow" \
% instance_name
utils.execute('tee', cgroups_path,
process_input=cgroup_info, run_as_root=True)
def unbind(target):
if target:
utils.execute('umount', target, run_as_root=True)
class _DiskImage(object):
"""Provide operations on a disk image file."""
def __init__(self, image, partition=None, use_cow=False, mount_dir=None):
# These passed to each mounter
self.image = image
self.partition = partition
self.mount_dir = mount_dir
# Internal
self._mkdir = False
self._mounter = None
self._errors = []
# As a performance tweak, don't bother trying to
# directly loopback mount a cow image.
self.handlers = FLAGS.img_handlers[:]
if use_cow and 'loop' in self.handlers:
self.handlers.remove('loop')
if not self.handlers:
raise exception.Error(_('no capable image handler configured'))
@property
def errors(self):
"""Return the collated errors from all operations."""
return '\n--\n'.join([''] + self._errors)
@staticmethod
def _handler_class(mode):
"""Look up the appropriate class to use based on MODE."""
for cls in (loop.Mount, nbd.Mount, guestfs.Mount):
if cls.mode == mode:
return cls
raise exception.Error(_("unknown disk image handler: %s") % mode)
def mount(self):
"""Mount a disk image, using the object attributes.
The first supported means provided by the mount classes is used.
True, or False is returned and the 'errors' attribute
contains any diagnostics.
"""
if self._mounter:
raise exception.Error(_('image already mounted'))
if not self.mount_dir:
self.mount_dir = tempfile.mkdtemp()
self._mkdir = True
try:
for h in self.handlers:
mounter_cls = self._handler_class(h)
mounter = mounter_cls(image=self.image,
partition=self.partition,
mount_dir=self.mount_dir)
if mounter.do_mount():
self._mounter = mounter
break
else:
LOG.debug(mounter.error)
self._errors.append(mounter.error)
finally:
if not self._mounter:
self.umount() # rmdir
return bool(self._mounter)
def umount(self):
"""Unmount a disk image from the file system."""
try:
if self._mounter:
self._mounter.do_umount()
finally:
if self._mkdir:
os.rmdir(self.mount_dir)
# Public module functions
def inject_data(image,
key=None, net=None, metadata=None, admin_password=None,
partition=None, use_cow=False):
"""Injects a ssh key and optionally net data into a disk image.
it will mount the image as a fully partitioned disk and attempt to inject
into the specified partition number.
If partition is not specified it mounts the image as a single partition.
"""
img = _DiskImage(image=image, partition=partition, use_cow=use_cow)
if img.mount():
try:
inject_data_into_fs(img.mount_dir,
key, net, metadata, admin_password,
utils.execute)
finally:
img.umount()
else:
raise exception.Error(img.errors)
def inject_files(image, files, partition=None, use_cow=False):
"""Injects arbitrary files into a disk image"""
img = _DiskImage(image=image, partition=partition, use_cow=use_cow)
if img.mount():
try:
for (path, contents) in files:
_inject_file_into_fs(img.mount_dir, path, contents)
finally:
img.umount()
else:
raise exception.Error(img.errors)
def setup_container(image, container_dir=None, use_cow=False):
"""Setup the LXC container.
It will mount the loopback image to the container directory in order
to create the root filesystem for the container.
LXC does not support qcow2 images yet.
"""
try:
img = _DiskImage(image=image, use_cow=use_cow, mount_dir=container_dir)
if img.mount():
return img
else:
raise exception.Error(img.errors)
except Exception, exn:
LOG.exception(_('Failed to mount filesystem: %s'), exn)
def destroy_container(img):
"""Destroy the container once it terminates.
It will umount the container that is mounted,
and delete any linked devices.
LXC does not support qcow2 images yet.
"""
try:
if img:
img.umount()
except Exception, exn:
LOG.exception(_('Failed to remove container: %s'), exn)
def inject_data_into_fs(fs, key, net, metadata, admin_password, execute):
"""Injects data into a filesystem already mounted by the caller.
Virt connections can call this directly if they mount their fs
in a different way to inject_data
"""
if key:
_inject_key_into_fs(key, fs, execute=execute)
if net:
_inject_net_into_fs(net, fs, execute=execute)
if metadata:
_inject_metadata_into_fs(metadata, fs, execute=execute)
if admin_password:
_inject_admin_password_into_fs(admin_password, fs, execute=execute)
def _join_and_check_path_within_fs(fs, *args):
'''os.path.join() with safety check for injected file paths.
Join the supplied path components and make sure that the
resulting path we are injecting into is within the
mounted guest fs. Trying to be clever and specifying a
path with '..' in it will hit this safeguard.
'''
absolute_path = os.path.realpath(os.path.join(fs, *args))
if not absolute_path.startswith(os.path.realpath(fs) + '/'):
raise exception.Invalid(_('injected file path not valid'))
return absolute_path
def _inject_file_into_fs(fs, path, contents, append=False):
absolute_path = _join_and_check_path_within_fs(fs, path.lstrip('/'))
parent_dir = os.path.dirname(absolute_path)
utils.execute('mkdir', '-p', parent_dir, run_as_root=True)
args = []
if append:
args.append('-a')
args.append(absolute_path)
kwargs = dict(process_input=contents, run_as_root=True)
utils.execute('tee', *args, **kwargs)
def _inject_metadata_into_fs(metadata, fs, execute=None):
metadata = dict([(m.key, m.value) for m in metadata])
_inject_file_into_fs(fs, 'meta.js', json.dumps(metadata))
def _inject_key_into_fs(key, fs, execute=None):
"""Add the given public ssh key to root's authorized_keys.
key is an ssh key string.
fs is the path to the base of the filesystem into which to inject the key.
"""
sshdir = _join_and_check_path_within_fs(fs, 'root', '.ssh')
utils.execute('mkdir', '-p', sshdir, run_as_root=True)
utils.execute('chown', 'root', sshdir, run_as_root=True)
utils.execute('chmod', '700', sshdir, run_as_root=True)
keyfile = os.path.join('root', '.ssh', 'authorized_keys')
key_data = ''.join([
'\n',
'# The following ssh key was injected by Nova',
'\n',
key.strip(),
'\n',
])
_inject_file_into_fs(fs, keyfile, key_data, append=True)
def _inject_net_into_fs(net, fs, execute=None):
"""Inject /etc/network/interfaces into the filesystem rooted at fs.
net is the contents of /etc/network/interfaces.
"""
netdir = _join_and_check_path_within_fs(fs, 'etc', 'network')
utils.execute('mkdir', '-p', netdir, run_as_root=True)
utils.execute('chown', 'root:root', netdir, run_as_root=True)
utils.execute('chmod', 755, netdir, run_as_root=True)
netfile = os.path.join('etc', 'network', 'interfaces')
_inject_file_into_fs(fs, netfile, net)
def _inject_admin_password_into_fs(admin_passwd, fs, execute=None):
"""Set the root password to admin_passwd
admin_password is a root password
fs is the path to the base of the filesystem into which to inject
the key.
This method modifies the instance filesystem directly,
and does not require a guest agent running in the instance.
"""
# The approach used here is to copy the password and shadow
# files from the instance filesystem to local files, make any
# necessary changes, and then copy them back.
admin_user = 'root'
fd, tmp_passwd = tempfile.mkstemp()
os.close(fd)
fd, tmp_shadow = tempfile.mkstemp()
os.close(fd)
passwd_path = _join_and_check_path_within_fs(fs, 'etc', 'passwd')
shadow_path = _join_and_check_path_within_fs(fs, 'etc', 'shadow')
utils.execute('cp', passwd_path, tmp_passwd, run_as_root=True)
utils.execute('cp', shadow_path, tmp_shadow, run_as_root=True)
_set_passwd(admin_user, admin_passwd, tmp_passwd, tmp_shadow)
utils.execute('cp', tmp_passwd, passwd_path, run_as_root=True)
os.unlink(tmp_passwd)
utils.execute('cp', tmp_shadow, shadow_path, run_as_root=True)
os.unlink(tmp_shadow)
def _set_passwd(username, admin_passwd, passwd_file, shadow_file):
"""set the password for username to admin_passwd
The passwd_file is not modified. The shadow_file is updated.
if the username is not found in both files, an exception is raised.
:param username: the username
:param encrypted_passwd: the encrypted password
:param passwd_file: path to the passwd file
:param shadow_file: path to the shadow password file
:returns: nothing
:raises: exception.Error(), IOError()
"""
salt_set = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789./')
# encryption algo - id pairs for crypt()
algos = {'SHA-512': '$6$', 'SHA-256': '$5$', 'MD5': '$1$', 'DES': ''}
salt = 16 * ' '
salt = ''.join([random.choice(salt_set) for c in salt])
# crypt() depends on the underlying libc, and may not support all
# forms of hash. We try md5 first. If we get only 13 characters back,
# then the underlying crypt() didn't understand the '$n$salt' magic,
# so we fall back to DES.
# md5 is the default because it's widely supported. Although the
# local crypt() might support stronger SHA, the target instance
# might not.
encrypted_passwd = crypt.crypt(admin_passwd, algos['MD5'] + salt)
if len(encrypted_passwd) == 13:
encrypted_passwd = crypt.crypt(admin_passwd, algos['DES'] + salt)
try:
p_file = open(passwd_file, 'rb')
s_file = open(shadow_file, 'rb')
# username MUST exist in passwd file or it's an error
found = False
for entry in p_file:
split_entry = entry.split(':')
if split_entry[0] == username:
found = True
break
if not found:
msg = _('User %(username)s not found in password file.')
raise exception.Error(msg % username)
# update password in the shadow file.It's an error if the
# the user doesn't exist.
new_shadow = list()
found = False
for entry in s_file:
split_entry = entry.split(':')
if split_entry[0] == username:
split_entry[1] = encrypted_passwd
found = True
new_entry = ':'.join(split_entry)
new_shadow.append(new_entry)
s_file.close()
if not found:
msg = _('User %(username)s not found in shadow file.')
raise exception.Error(msg % username)
s_file = open(shadow_file, 'wb')
for entry in new_shadow:
s_file.write(entry)
finally:
p_file.close()
s_file.close()
| ./CrossVul/dataset_final_sorted/CWE-22/py/good_3680_1 |
crossvul-python_data_bad_495_2 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-22/py/bad_495_2 |
crossvul-python_data_bad_3538_1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Proxy AMI-related calls from cloud controller to objectstore service."""
import binascii
import os
import shutil
import tarfile
import tempfile
from xml.etree import ElementTree
import boto.s3.connection
import eventlet
from nova import crypto
from nova import exception
from nova import flags
from nova import image
from nova import log as logging
from nova import utils
from nova.image import service
from nova.api.ec2 import ec2utils
LOG = logging.getLogger("nova.image.s3")
FLAGS = flags.FLAGS
flags.DEFINE_string('image_decryption_dir', '/tmp',
'parent dir for tempdir used for image decryption')
flags.DEFINE_string('s3_access_key', 'notchecked',
'access key to use for s3 server for images')
flags.DEFINE_string('s3_secret_key', 'notchecked',
'secret key to use for s3 server for images')
class S3ImageService(service.BaseImageService):
"""Wraps an existing image service to support s3 based register."""
def __init__(self, service=None, *args, **kwargs):
self.service = service or image.get_default_image_service()
self.service.__init__(*args, **kwargs)
def create(self, context, metadata, data=None):
"""Create an image.
metadata['properties'] should contain image_location.
"""
image = self._s3_create(context, metadata)
return image
def delete(self, context, image_id):
self.service.delete(context, image_id)
def update(self, context, image_id, metadata, data=None):
image = self.service.update(context, image_id, metadata, data)
return image
def index(self, context):
return self.service.index(context)
def detail(self, context):
return self.service.detail(context)
def show(self, context, image_id):
return self.service.show(context, image_id)
def show_by_name(self, context, name):
return self.service.show_by_name(context, name)
@staticmethod
def _conn(context):
# NOTE(vish): access and secret keys for s3 server are not
# checked in nova-objectstore
access = FLAGS.s3_access_key
secret = FLAGS.s3_secret_key
calling = boto.s3.connection.OrdinaryCallingFormat()
return boto.s3.connection.S3Connection(aws_access_key_id=access,
aws_secret_access_key=secret,
is_secure=False,
calling_format=calling,
port=FLAGS.s3_port,
host=FLAGS.s3_host)
@staticmethod
def _download_file(bucket, filename, local_dir):
key = bucket.get_key(filename)
local_filename = os.path.join(local_dir, filename)
key.get_contents_to_filename(local_filename)
return local_filename
def _s3_parse_manifest(self, context, metadata, manifest):
manifest = ElementTree.fromstring(manifest)
image_format = 'ami'
image_type = 'machine'
try:
kernel_id = manifest.find('machine_configuration/kernel_id').text
if kernel_id == 'true':
image_format = 'aki'
image_type = 'kernel'
kernel_id = None
except Exception:
kernel_id = None
try:
ramdisk_id = manifest.find('machine_configuration/ramdisk_id').text
if ramdisk_id == 'true':
image_format = 'ari'
image_type = 'ramdisk'
ramdisk_id = None
except Exception:
ramdisk_id = None
try:
arch = manifest.find('machine_configuration/architecture').text
except Exception:
arch = 'x86_64'
# NOTE(yamahata):
# EC2 ec2-budlne-image --block-device-mapping accepts
# <virtual name>=<device name> where
# virtual name = {ami, root, swap, ephemeral<N>}
# where N is no negative integer
# device name = the device name seen by guest kernel.
# They are converted into
# block_device_mapping/mapping/{virtual, device}
#
# Do NOT confuse this with ec2-register's block device mapping
# argument.
mappings = []
try:
block_device_mapping = manifest.findall('machine_configuration/'
'block_device_mapping/'
'mapping')
for bdm in block_device_mapping:
mappings.append({'virtual': bdm.find('virtual').text,
'device': bdm.find('device').text})
except Exception:
mappings = []
properties = metadata['properties']
properties['project_id'] = context.project_id
properties['architecture'] = arch
if kernel_id:
properties['kernel_id'] = ec2utils.ec2_id_to_id(kernel_id)
if ramdisk_id:
properties['ramdisk_id'] = ec2utils.ec2_id_to_id(ramdisk_id)
if mappings:
properties['mappings'] = mappings
metadata.update({'disk_format': image_format,
'container_format': image_format,
'status': 'queued',
'is_public': False,
'properties': properties})
metadata['properties']['image_state'] = 'pending'
image = self.service.create(context, metadata)
return manifest, image
def _s3_create(self, context, metadata):
"""Gets a manifext from s3 and makes an image."""
image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir)
image_location = metadata['properties']['image_location']
bucket_name = image_location.split('/')[0]
manifest_path = image_location[len(bucket_name) + 1:]
bucket = self._conn(context).get_bucket(bucket_name)
key = bucket.get_key(manifest_path)
manifest = key.get_contents_as_string()
manifest, image = self._s3_parse_manifest(context, metadata, manifest)
image_id = image['id']
def delayed_create():
"""This handles the fetching and decrypting of the part files."""
log_vars = {'image_location': image_location,
'image_path': image_path}
metadata['properties']['image_state'] = 'downloading'
self.service.update(context, image_id, metadata)
try:
parts = []
elements = manifest.find('image').getiterator('filename')
for fn_element in elements:
part = self._download_file(bucket,
fn_element.text,
image_path)
parts.append(part)
# NOTE(vish): this may be suboptimal, should we use cat?
enc_filename = os.path.join(image_path, 'image.encrypted')
with open(enc_filename, 'w') as combined:
for filename in parts:
with open(filename) as part:
shutil.copyfileobj(part, combined)
except Exception:
LOG.exception(_("Failed to download %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_download'
self.service.update(context, image_id, metadata)
return
metadata['properties']['image_state'] = 'decrypting'
self.service.update(context, image_id, metadata)
try:
hex_key = manifest.find('image/ec2_encrypted_key').text
encrypted_key = binascii.a2b_hex(hex_key)
hex_iv = manifest.find('image/ec2_encrypted_iv').text
encrypted_iv = binascii.a2b_hex(hex_iv)
# FIXME(vish): grab key from common service so this can run on
# any host.
cloud_pk = crypto.key_path(context.project_id)
dec_filename = os.path.join(image_path, 'image.tar.gz')
self._decrypt_image(enc_filename, encrypted_key,
encrypted_iv, cloud_pk,
dec_filename)
except Exception:
LOG.exception(_("Failed to decrypt %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_decrypt'
self.service.update(context, image_id, metadata)
return
metadata['properties']['image_state'] = 'untarring'
self.service.update(context, image_id, metadata)
try:
unz_filename = self._untarzip_image(image_path, dec_filename)
except Exception:
LOG.exception(_("Failed to untar %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_untar'
self.service.update(context, image_id, metadata)
return
metadata['properties']['image_state'] = 'uploading'
self.service.update(context, image_id, metadata)
try:
with open(unz_filename) as image_file:
self.service.update(context, image_id,
metadata, image_file)
except Exception:
LOG.exception(_("Failed to upload %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_upload'
self.service.update(context, image_id, metadata)
return
metadata['properties']['image_state'] = 'available'
metadata['status'] = 'active'
self.service.update(context, image_id, metadata)
shutil.rmtree(image_path)
eventlet.spawn_n(delayed_create)
return image
@staticmethod
def _decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
cloud_private_key, decrypted_filename):
key, err = utils.execute('openssl',
'rsautl',
'-decrypt',
'-inkey', '%s' % cloud_private_key,
process_input=encrypted_key,
check_exit_code=False)
if err:
raise exception.Error(_('Failed to decrypt private key: %s')
% err)
iv, err = utils.execute('openssl',
'rsautl',
'-decrypt',
'-inkey', '%s' % cloud_private_key,
process_input=encrypted_iv,
check_exit_code=False)
if err:
raise exception.Error(_('Failed to decrypt initialization '
'vector: %s') % err)
_out, err = utils.execute('openssl', 'enc',
'-d', '-aes-128-cbc',
'-in', '%s' % (encrypted_filename,),
'-K', '%s' % (key,),
'-iv', '%s' % (iv,),
'-out', '%s' % (decrypted_filename,),
check_exit_code=False)
if err:
raise exception.Error(_('Failed to decrypt image file '
'%(image_file)s: %(err)s') %
{'image_file': encrypted_filename,
'err': err})
@staticmethod
def _untarzip_image(path, filename):
tar_file = tarfile.open(filename, 'r|gz')
tar_file.extractall(path)
image_file = tar_file.getnames()[0]
tar_file.close()
return os.path.join(path, image_file)
| ./CrossVul/dataset_final_sorted/CWE-22/py/bad_3538_1 |
crossvul-python_data_good_994_0 | # Natural Language Toolkit: Corpus & Model Downloader
#
# Copyright (C) 2001-2019 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
The NLTK corpus and module downloader. This module defines several
interfaces which can be used to download corpora, models, and other
data packages that can be used with NLTK.
Downloading Packages
====================
If called with no arguments, ``download()`` will display an interactive
interface which can be used to download and install new packages.
If Tkinter is available, then a graphical interface will be shown,
otherwise a simple text interface will be provided.
Individual packages can be downloaded by calling the ``download()``
function with a single argument, giving the package identifier for the
package that should be downloaded:
>>> download('treebank') # doctest: +SKIP
[nltk_data] Downloading package 'treebank'...
[nltk_data] Unzipping corpora/treebank.zip.
NLTK also provides a number of \"package collections\", consisting of
a group of related packages. To download all packages in a
colleciton, simply call ``download()`` with the collection's
identifier:
>>> download('all-corpora') # doctest: +SKIP
[nltk_data] Downloading package 'abc'...
[nltk_data] Unzipping corpora/abc.zip.
[nltk_data] Downloading package 'alpino'...
[nltk_data] Unzipping corpora/alpino.zip.
...
[nltk_data] Downloading package 'words'...
[nltk_data] Unzipping corpora/words.zip.
Download Directory
==================
By default, packages are installed in either a system-wide directory
(if Python has sufficient access to write to it); or in the current
user's home directory. However, the ``download_dir`` argument may be
used to specify a different installation target, if desired.
See ``Downloader.default_download_dir()`` for more a detailed
description of how the default download directory is chosen.
NLTK Download Server
====================
Before downloading any packages, the corpus and module downloader
contacts the NLTK download server, to retrieve an index file
describing the available packages. By default, this index file is
loaded from ``https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml``.
If necessary, it is possible to create a new ``Downloader`` object,
specifying a different URL for the package index file.
Usage::
python nltk/downloader.py [-d DATADIR] [-q] [-f] [-k] PACKAGE_IDS
or::
python -m nltk.downloader [-d DATADIR] [-q] [-f] [-k] PACKAGE_IDS
"""
# ----------------------------------------------------------------------
from __future__ import print_function, division, unicode_literals
"""
0 1 2 3
[label][----][label][----]
[column ][column ]
Notes
=====
Handling data files.. Some questions:
* Should the data files be kept zipped or unzipped? I say zipped.
* Should the data files be kept in svn at all? Advantages: history;
automatic version numbers; 'svn up' could be used rather than the
downloader to update the corpora. Disadvantages: they're big,
which makes working from svn a bit of a pain. And we're planning
to potentially make them much bigger. I don't think we want
people to have to download 400MB corpora just to use nltk from svn.
* Compromise: keep the data files in trunk/data rather than in
trunk/nltk. That way you can check them out in svn if you want
to; but you don't need to, and you can use the downloader instead.
* Also: keep models in mind. When we change the code, we'd
potentially like the models to get updated. This could require a
little thought.
* So.. let's assume we have a trunk/data directory, containing a bunch
of packages. The packages should be kept as zip files, because we
really shouldn't be editing them much (well -- we may edit models
more, but they tend to be binary-ish files anyway, where diffs
aren't that helpful). So we'll have trunk/data, with a bunch of
files like abc.zip and treebank.zip and propbank.zip. For each
package we could also have eg treebank.xml and propbank.xml,
describing the contents of the package (name, copyright, license,
etc). Collections would also have .xml files. Finally, we would
pull all these together to form a single index.xml file. Some
directory structure wouldn't hurt. So how about::
/trunk/data/ ....................... root of data svn
index.xml ........................ main index file
src/ ............................. python scripts
packages/ ........................ dir for packages
corpora/ ....................... zip & xml files for corpora
grammars/ ...................... zip & xml files for grammars
taggers/ ....................... zip & xml files for taggers
tokenizers/ .................... zip & xml files for tokenizers
etc.
collections/ ..................... xml files for collections
Where the root (/trunk/data) would contain a makefile; and src/
would contain a script to update the info.xml file. It could also
contain scripts to rebuild some of the various model files. The
script that builds index.xml should probably check that each zip
file expands entirely into a single subdir, whose name matches the
package's uid.
Changes I need to make:
- in index: change "size" to "filesize" or "compressed-size"
- in index: add "unzipped-size"
- when checking status: check both compressed & uncompressed size.
uncompressed size is important to make sure we detect a problem
if something got partially unzipped. define new status values
to differentiate stale vs corrupt vs corruptly-uncompressed??
(we shouldn't need to re-download the file if the zip file is ok
but it didn't get uncompressed fully.)
- add other fields to the index: author, license, copyright, contact,
etc.
the current grammars/ package would become a single new package (eg
toy-grammars or book-grammars).
xml file should have:
- authorship info
- license info
- copyright info
- contact info
- info about what type of data/annotation it contains?
- recommended corpus reader?
collections can contain other collections. they can also contain
multiple package types (corpora & models). Have a single 'basics'
package that includes everything we talk about in the book?
n.b.: there will have to be a fallback to the punkt tokenizer, in case
they didn't download that model.
default: unzip or not?
"""
import time, os, zipfile, sys, textwrap, threading, itertools, shutil, functools
import subprocess
from hashlib import md5
from xml.etree import ElementTree
try:
TKINTER = True
from six.moves.tkinter import (
Tk,
Frame,
Label,
Entry,
Button,
Canvas,
Menu,
IntVar,
TclError,
)
from six.moves.tkinter_messagebox import showerror
from nltk.draw.table import Table
from nltk.draw.util import ShowText
except ImportError:
TKINTER = False
TclError = ValueError
from six import string_types, text_type
from six.moves import input
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import HTTPError, URLError
import nltk
from nltk.compat import python_2_unicode_compatible
# urllib2 = nltk.internals.import_from_stdlib('urllib2')
######################################################################
# Directory entry objects (from the data server's index file)
######################################################################
@python_2_unicode_compatible
class Package(object):
"""
A directory entry for a downloadable package. These entries are
extracted from the XML index file that is downloaded by
``Downloader``. Each package consists of a single file; but if
that file is a zip file, then it can be automatically decompressed
when the package is installed.
"""
def __init__(
self,
id,
url,
name=None,
subdir='',
size=None,
unzipped_size=None,
checksum=None,
svn_revision=None,
copyright='Unknown',
contact='Unknown',
license='Unknown',
author='Unknown',
unzip=True,
**kw
):
self.id = id
"""A unique identifier for this package."""
self.name = name or id
"""A string name for this package."""
self.subdir = subdir
"""The subdirectory where this package should be installed.
E.g., ``'corpora'`` or ``'taggers'``."""
self.url = url
"""A URL that can be used to download this package's file."""
self.size = int(size)
"""The filesize (in bytes) of the package file."""
self.unzipped_size = int(unzipped_size)
"""The total filesize of the files contained in the package's
zipfile."""
self.checksum = checksum
"""The MD-5 checksum of the package file."""
self.svn_revision = svn_revision
"""A subversion revision number for this package."""
self.copyright = copyright
"""Copyright holder for this package."""
self.contact = contact
"""Name & email of the person who should be contacted with
questions about this package."""
self.license = license
"""License information for this package."""
self.author = author
"""Author of this package."""
ext = os.path.splitext(url.split('/')[-1])[1]
self.filename = os.path.join(subdir, id + ext)
"""The filename that should be used for this package's file. It
is formed by joining ``self.subdir`` with ``self.id``, and
using the same extension as ``url``."""
self.unzip = bool(int(unzip)) # '0' or '1'
"""A flag indicating whether this corpus should be unzipped by
default."""
# Include any other attributes provided by the XML file.
self.__dict__.update(kw)
@staticmethod
def fromxml(xml):
if isinstance(xml, string_types):
xml = ElementTree.parse(xml)
for key in xml.attrib:
xml.attrib[key] = text_type(xml.attrib[key])
return Package(**xml.attrib)
def __lt__(self, other):
return self.id < other.id
def __repr__(self):
return '<Package %s>' % self.id
@python_2_unicode_compatible
class Collection(object):
"""
A directory entry for a collection of downloadable packages.
These entries are extracted from the XML index file that is
downloaded by ``Downloader``.
"""
def __init__(self, id, children, name=None, **kw):
self.id = id
"""A unique identifier for this collection."""
self.name = name or id
"""A string name for this collection."""
self.children = children
"""A list of the ``Collections`` or ``Packages`` directly
contained by this collection."""
self.packages = None
"""A list of ``Packages`` contained by this collection or any
collections it recursively contains."""
# Include any other attributes provided by the XML file.
self.__dict__.update(kw)
@staticmethod
def fromxml(xml):
if isinstance(xml, string_types):
xml = ElementTree.parse(xml)
for key in xml.attrib:
xml.attrib[key] = text_type(xml.attrib[key])
children = [child.get('ref') for child in xml.findall('item')]
return Collection(children=children, **xml.attrib)
def __lt__(self, other):
return self.id < other.id
def __repr__(self):
return '<Collection %s>' % self.id
######################################################################
# Message Passing Objects
######################################################################
class DownloaderMessage(object):
"""A status message object, used by ``incr_download`` to
communicate its progress."""
class StartCollectionMessage(DownloaderMessage):
"""Data server has started working on a collection of packages."""
def __init__(self, collection):
self.collection = collection
class FinishCollectionMessage(DownloaderMessage):
"""Data server has finished working on a collection of packages."""
def __init__(self, collection):
self.collection = collection
class StartPackageMessage(DownloaderMessage):
"""Data server has started working on a package."""
def __init__(self, package):
self.package = package
class FinishPackageMessage(DownloaderMessage):
"""Data server has finished working on a package."""
def __init__(self, package):
self.package = package
class StartDownloadMessage(DownloaderMessage):
"""Data server has started downloading a package."""
def __init__(self, package):
self.package = package
class FinishDownloadMessage(DownloaderMessage):
"""Data server has finished downloading a package."""
def __init__(self, package):
self.package = package
class StartUnzipMessage(DownloaderMessage):
"""Data server has started unzipping a package."""
def __init__(self, package):
self.package = package
class FinishUnzipMessage(DownloaderMessage):
"""Data server has finished unzipping a package."""
def __init__(self, package):
self.package = package
class UpToDateMessage(DownloaderMessage):
"""The package download file is already up-to-date"""
def __init__(self, package):
self.package = package
class StaleMessage(DownloaderMessage):
"""The package download file is out-of-date or corrupt"""
def __init__(self, package):
self.package = package
class ErrorMessage(DownloaderMessage):
"""Data server encountered an error"""
def __init__(self, package, message):
self.package = package
if isinstance(message, Exception):
self.message = str(message)
else:
self.message = message
class ProgressMessage(DownloaderMessage):
"""Indicates how much progress the data server has made"""
def __init__(self, progress):
self.progress = progress
class SelectDownloadDirMessage(DownloaderMessage):
"""Indicates what download directory the data server is using"""
def __init__(self, download_dir):
self.download_dir = download_dir
######################################################################
# NLTK Data Server
######################################################################
class Downloader(object):
"""
A class used to access the NLTK data server, which can be used to
download corpora and other data packages.
"""
# /////////////////////////////////////////////////////////////////
# Configuration
# /////////////////////////////////////////////////////////////////
INDEX_TIMEOUT = 60 * 60 # 1 hour
"""The amount of time after which the cached copy of the data
server index will be considered 'stale,' and will be
re-downloaded."""
DEFAULT_URL = 'https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml'
"""The default URL for the NLTK data server's index. An
alternative URL can be specified when creating a new
``Downloader`` object."""
# /////////////////////////////////////////////////////////////////
# Status Constants
# /////////////////////////////////////////////////////////////////
INSTALLED = 'installed'
"""A status string indicating that a package or collection is
installed and up-to-date."""
NOT_INSTALLED = 'not installed'
"""A status string indicating that a package or collection is
not installed."""
STALE = 'out of date'
"""A status string indicating that a package or collection is
corrupt or out-of-date."""
PARTIAL = 'partial'
"""A status string indicating that a collection is partially
installed (i.e., only some of its packages are installed.)"""
# /////////////////////////////////////////////////////////////////
# Cosntructor
# /////////////////////////////////////////////////////////////////
def __init__(self, server_index_url=None, download_dir=None):
self._url = server_index_url or self.DEFAULT_URL
"""The URL for the data server's index file."""
self._collections = {}
"""Dictionary from collection identifier to ``Collection``"""
self._packages = {}
"""Dictionary from package identifier to ``Package``"""
self._download_dir = download_dir
"""The default directory to which packages will be downloaded."""
self._index = None
"""The XML index file downloaded from the data server"""
self._index_timestamp = None
"""Time at which ``self._index`` was downloaded. If it is more
than ``INDEX_TIMEOUT`` seconds old, it will be re-downloaded."""
self._status_cache = {}
"""Dictionary from package/collection identifier to status
string (``INSTALLED``, ``NOT_INSTALLED``, ``STALE``, or
``PARTIAL``). Cache is used for packages only, not
collections."""
self._errors = None
"""Flag for telling if all packages got successfully downloaded or not."""
# decide where we're going to save things to.
if self._download_dir is None:
self._download_dir = self.default_download_dir()
# /////////////////////////////////////////////////////////////////
# Information
# /////////////////////////////////////////////////////////////////
def list(
self,
download_dir=None,
show_packages=True,
show_collections=True,
header=True,
more_prompt=False,
skip_installed=False,
):
lines = 0 # for more_prompt
if download_dir is None:
download_dir = self._download_dir
print('Using default data directory (%s)' % download_dir)
if header:
print('=' * (26 + len(self._url)))
print(' Data server index for <%s>' % self._url)
print('=' * (26 + len(self._url)))
lines += 3 # for more_prompt
stale = partial = False
categories = []
if show_packages:
categories.append('packages')
if show_collections:
categories.append('collections')
for category in categories:
print('%s:' % category.capitalize())
lines += 1 # for more_prompt
for info in sorted(getattr(self, category)(), key=str):
status = self.status(info, download_dir)
if status == self.INSTALLED and skip_installed:
continue
if status == self.STALE:
stale = True
if status == self.PARTIAL:
partial = True
prefix = {
self.INSTALLED: '*',
self.STALE: '-',
self.PARTIAL: 'P',
self.NOT_INSTALLED: ' ',
}[status]
name = textwrap.fill(
'-' * 27 + (info.name or info.id), 75, subsequent_indent=27 * ' '
)[27:]
print(' [%s] %s %s' % (prefix, info.id.ljust(20, '.'), name))
lines += len(name.split('\n')) # for more_prompt
if more_prompt and lines > 20:
user_input = input("Hit Enter to continue: ")
if user_input.lower() in ('x', 'q'):
return
lines = 0
print()
msg = '([*] marks installed packages'
if stale:
msg += '; [-] marks out-of-date or corrupt packages'
if partial:
msg += '; [P] marks partially installed collections'
print(textwrap.fill(msg + ')', subsequent_indent=' ', width=76))
def packages(self):
self._update_index()
return self._packages.values()
def corpora(self):
self._update_index()
return [pkg for (id, pkg) in self._packages.items() if pkg.subdir == 'corpora']
def models(self):
self._update_index()
return [pkg for (id, pkg) in self._packages.items() if pkg.subdir != 'corpora']
def collections(self):
self._update_index()
return self._collections.values()
# /////////////////////////////////////////////////////////////////
# Downloading
# /////////////////////////////////////////////////////////////////
def _info_or_id(self, info_or_id):
if isinstance(info_or_id, string_types):
return self.info(info_or_id)
else:
return info_or_id
# [xx] When during downloading is it 'safe' to abort? Only unsafe
# time is *during* an unzip -- we don't want to leave a
# partially-unzipped corpus in place because we wouldn't notice
# it. But if we had the exact total size of the unzipped corpus,
# then that would be fine. Then we could abort anytime we want!
# So this is really what we should do. That way the threaded
# downloader in the gui can just kill the download thread anytime
# it wants.
def incr_download(self, info_or_id, download_dir=None, force=False):
# If they didn't specify a download_dir, then use the default one.
if download_dir is None:
download_dir = self._download_dir
yield SelectDownloadDirMessage(download_dir)
# If they gave us a list of ids, then download each one.
if isinstance(info_or_id, (list, tuple)):
for msg in self._download_list(info_or_id, download_dir, force):
yield msg
return
# Look up the requested collection or package.
try:
info = self._info_or_id(info_or_id)
except (IOError, ValueError) as e:
yield ErrorMessage(None, 'Error loading %s: %s' % (info_or_id, e))
return
# Handle collections.
if isinstance(info, Collection):
yield StartCollectionMessage(info)
for msg in self.incr_download(info.children, download_dir, force):
yield msg
yield FinishCollectionMessage(info)
# Handle Packages (delegate to a helper function).
else:
for msg in self._download_package(info, download_dir, force):
yield msg
def _num_packages(self, item):
if isinstance(item, Package):
return 1
else:
return len(item.packages)
def _download_list(self, items, download_dir, force):
# Look up the requested items.
for i in range(len(items)):
try:
items[i] = self._info_or_id(items[i])
except (IOError, ValueError) as e:
yield ErrorMessage(items[i], e)
return
# Download each item, re-scaling their progress.
num_packages = sum(self._num_packages(item) for item in items)
progress = 0
for i, item in enumerate(items):
if isinstance(item, Package):
delta = 1.0 / num_packages
else:
delta = len(item.packages) / num_packages
for msg in self.incr_download(item, download_dir, force):
if isinstance(msg, ProgressMessage):
yield ProgressMessage(progress + msg.progress * delta)
else:
yield msg
progress += 100 * delta
def _download_package(self, info, download_dir, force):
yield StartPackageMessage(info)
yield ProgressMessage(0)
# Do we already have the current version?
status = self.status(info, download_dir)
if not force and status == self.INSTALLED:
yield UpToDateMessage(info)
yield ProgressMessage(100)
yield FinishPackageMessage(info)
return
# Remove the package from our status cache
self._status_cache.pop(info.id, None)
# Check for (and remove) any old/stale version.
filepath = os.path.join(download_dir, info.filename)
if os.path.exists(filepath):
if status == self.STALE:
yield StaleMessage(info)
os.remove(filepath)
# Ensure the download_dir exists
if not os.path.exists(download_dir):
os.mkdir(download_dir)
if not os.path.exists(os.path.join(download_dir, info.subdir)):
os.mkdir(os.path.join(download_dir, info.subdir))
# Download the file. This will raise an IOError if the url
# is not found.
yield StartDownloadMessage(info)
yield ProgressMessage(5)
try:
infile = urlopen(info.url)
with open(filepath, 'wb') as outfile:
# print info.size
num_blocks = max(1, info.size / (1024 * 16))
for block in itertools.count():
s = infile.read(1024 * 16) # 16k blocks.
outfile.write(s)
if not s:
break
if block % 2 == 0: # how often?
yield ProgressMessage(min(80, 5 + 75 * (block / num_blocks)))
infile.close()
except IOError as e:
yield ErrorMessage(
info,
'Error downloading %r from <%s>:' '\n %s' % (info.id, info.url, e),
)
return
yield FinishDownloadMessage(info)
yield ProgressMessage(80)
# If it's a zipfile, uncompress it.
if info.filename.endswith('.zip'):
zipdir = os.path.join(download_dir, info.subdir)
# Unzip if we're unzipping by default; *or* if it's already
# been unzipped (presumably a previous version).
if info.unzip or os.path.exists(os.path.join(zipdir, info.id)):
yield StartUnzipMessage(info)
for msg in _unzip_iter(filepath, zipdir, verbose=False):
# Somewhat of a hack, but we need a proper package reference
msg.package = info
yield msg
yield FinishUnzipMessage(info)
yield FinishPackageMessage(info)
def download(
self,
info_or_id=None,
download_dir=None,
quiet=False,
force=False,
prefix='[nltk_data] ',
halt_on_error=True,
raise_on_error=False,
print_error_to=sys.stderr,
):
print_to = functools.partial(print, file=print_error_to)
# If no info or id is given, then use the interactive shell.
if info_or_id is None:
# [xx] hmm -- changing self._download_dir here seems like
# the wrong thing to do. Maybe the _interactive_download
# function should make a new copy of self to use?
if download_dir is not None:
self._download_dir = download_dir
self._interactive_download()
return True
else:
# Define a helper function for displaying output:
def show(s, prefix2=''):
print_to(
textwrap.fill(
s,
initial_indent=prefix + prefix2,
subsequent_indent=prefix + prefix2 + ' ' * 4,
)
)
for msg in self.incr_download(info_or_id, download_dir, force):
# Error messages
if isinstance(msg, ErrorMessage):
show(msg.message)
if raise_on_error:
raise ValueError(msg.message)
if halt_on_error:
return False
self._errors = True
if not quiet:
print_to("Error installing package. Retry? [n/y/e]")
choice = input().strip()
if choice in ['y', 'Y']:
if not self.download(
msg.package.id,
download_dir,
quiet,
force,
prefix,
halt_on_error,
raise_on_error,
):
return False
elif choice in ['e', 'E']:
return False
# All other messages
if not quiet:
# Collection downloading messages:
if isinstance(msg, StartCollectionMessage):
show('Downloading collection %r' % msg.collection.id)
prefix += ' | '
print_to(prefix)
elif isinstance(msg, FinishCollectionMessage):
print_to(prefix)
prefix = prefix[:-4]
if self._errors:
show(
'Downloaded collection %r with errors'
% msg.collection.id
)
else:
show('Done downloading collection %s' % msg.collection.id)
# Package downloading messages:
elif isinstance(msg, StartPackageMessage):
show(
'Downloading package %s to %s...'
% (msg.package.id, download_dir)
)
elif isinstance(msg, UpToDateMessage):
show('Package %s is already up-to-date!' % msg.package.id, ' ')
# elif isinstance(msg, StaleMessage):
# show('Package %s is out-of-date or corrupt' %
# msg.package.id, ' ')
elif isinstance(msg, StartUnzipMessage):
show('Unzipping %s.' % msg.package.filename, ' ')
# Data directory message:
elif isinstance(msg, SelectDownloadDirMessage):
download_dir = msg.download_dir
return True
def is_stale(self, info_or_id, download_dir=None):
return self.status(info_or_id, download_dir) == self.STALE
def is_installed(self, info_or_id, download_dir=None):
return self.status(info_or_id, download_dir) == self.INSTALLED
def clear_status_cache(self, id=None):
if id is None:
self._status_cache.clear()
else:
self._status_cache.pop(id, None)
def status(self, info_or_id, download_dir=None):
"""
Return a constant describing the status of the given package
or collection. Status can be one of ``INSTALLED``,
``NOT_INSTALLED``, ``STALE``, or ``PARTIAL``.
"""
if download_dir is None:
download_dir = self._download_dir
info = self._info_or_id(info_or_id)
# Handle collections:
if isinstance(info, Collection):
pkg_status = [self.status(pkg.id) for pkg in info.packages]
if self.STALE in pkg_status:
return self.STALE
elif self.PARTIAL in pkg_status:
return self.PARTIAL
elif self.INSTALLED in pkg_status and self.NOT_INSTALLED in pkg_status:
return self.PARTIAL
elif self.NOT_INSTALLED in pkg_status:
return self.NOT_INSTALLED
else:
return self.INSTALLED
# Handle packages:
else:
filepath = os.path.join(download_dir, info.filename)
if download_dir != self._download_dir:
return self._pkg_status(info, filepath)
else:
if info.id not in self._status_cache:
self._status_cache[info.id] = self._pkg_status(info, filepath)
return self._status_cache[info.id]
def _pkg_status(self, info, filepath):
if not os.path.exists(filepath):
return self.NOT_INSTALLED
# Check if the file has the correct size.
try:
filestat = os.stat(filepath)
except OSError:
return self.NOT_INSTALLED
if filestat.st_size != int(info.size):
return self.STALE
# Check if the file's checksum matches
if md5_hexdigest(filepath) != info.checksum:
return self.STALE
# If it's a zipfile, and it's been at least partially
# unzipped, then check if it's been fully unzipped.
if filepath.endswith('.zip'):
unzipdir = filepath[:-4]
if not os.path.exists(unzipdir):
return self.INSTALLED # but not unzipped -- ok!
if not os.path.isdir(unzipdir):
return self.STALE
unzipped_size = sum(
os.stat(os.path.join(d, f)).st_size
for d, _, files in os.walk(unzipdir)
for f in files
)
if unzipped_size != info.unzipped_size:
return self.STALE
# Otherwise, everything looks good.
return self.INSTALLED
def update(self, quiet=False, prefix='[nltk_data] '):
"""
Re-download any packages whose status is STALE.
"""
self.clear_status_cache()
for pkg in self.packages():
if self.status(pkg) == self.STALE:
self.download(pkg, quiet=quiet, prefix=prefix)
# /////////////////////////////////////////////////////////////////
# Index
# /////////////////////////////////////////////////////////////////
def _update_index(self, url=None):
"""A helper function that ensures that self._index is
up-to-date. If the index is older than self.INDEX_TIMEOUT,
then download it again."""
# Check if the index is aleady up-to-date. If so, do nothing.
if not (
self._index is None
or url is not None
or time.time() - self._index_timestamp > self.INDEX_TIMEOUT
):
return
# If a URL was specified, then update our URL.
self._url = url or self._url
# Download the index file.
self._index = nltk.internals.ElementWrapper(
ElementTree.parse(urlopen(self._url)).getroot()
)
self._index_timestamp = time.time()
# Build a dictionary of packages.
packages = [Package.fromxml(p) for p in self._index.findall('packages/package')]
self._packages = dict((p.id, p) for p in packages)
# Build a dictionary of collections.
collections = [
Collection.fromxml(c) for c in self._index.findall('collections/collection')
]
self._collections = dict((c.id, c) for c in collections)
# Replace identifiers with actual children in collection.children.
for collection in self._collections.values():
for i, child_id in enumerate(collection.children):
if child_id in self._packages:
collection.children[i] = self._packages[child_id]
elif child_id in self._collections:
collection.children[i] = self._collections[child_id]
else:
print(
'removing collection member with no package: {}'.format(
child_id
)
)
del collection.children[i]
# Fill in collection.packages for each collection.
for collection in self._collections.values():
packages = {}
queue = [collection]
for child in queue:
if isinstance(child, Collection):
queue.extend(child.children)
elif isinstance(child, Package):
packages[child.id] = child
else:
pass
collection.packages = packages.values()
# Flush the status cache
self._status_cache.clear()
def index(self):
"""
Return the XML index describing the packages available from
the data server. If necessary, this index will be downloaded
from the data server.
"""
self._update_index()
return self._index
def info(self, id):
"""Return the ``Package`` or ``Collection`` record for the
given item."""
self._update_index()
if id in self._packages:
return self._packages[id]
if id in self._collections:
return self._collections[id]
raise ValueError('Package %r not found in index' % id)
def xmlinfo(self, id):
"""Return the XML info record for the given item"""
self._update_index()
for package in self._index.findall('packages/package'):
if package.get('id') == id:
return package
for collection in self._index.findall('collections/collection'):
if collection.get('id') == id:
return collection
raise ValueError('Package %r not found in index' % id)
# /////////////////////////////////////////////////////////////////
# URL & Data Directory
# /////////////////////////////////////////////////////////////////
def _get_url(self):
"""The URL for the data server's index file."""
return self._url
def _set_url(self, url):
"""
Set a new URL for the data server. If we're unable to contact
the given url, then the original url is kept.
"""
original_url = self._url
try:
self._update_index(url)
except:
self._url = original_url
raise
url = property(_get_url, _set_url)
def default_download_dir(self):
"""
Return the directory to which packages will be downloaded by
default. This value can be overridden using the constructor,
or on a case-by-case basis using the ``download_dir`` argument when
calling ``download()``.
On Windows, the default download directory is
``PYTHONHOME/lib/nltk``, where *PYTHONHOME* is the
directory containing Python, e.g. ``C:\\Python25``.
On all other platforms, the default directory is the first of
the following which exists or which can be created with write
permission: ``/usr/share/nltk_data``, ``/usr/local/share/nltk_data``,
``/usr/lib/nltk_data``, ``/usr/local/lib/nltk_data``, ``~/nltk_data``.
"""
# Check if we are on GAE where we cannot write into filesystem.
if 'APPENGINE_RUNTIME' in os.environ:
return
# Check if we have sufficient permissions to install in a
# variety of system-wide locations.
for nltkdir in nltk.data.path:
if os.path.exists(nltkdir) and nltk.internals.is_writable(nltkdir):
return nltkdir
# On Windows, use %APPDATA%
if sys.platform == 'win32' and 'APPDATA' in os.environ:
homedir = os.environ['APPDATA']
# Otherwise, install in the user's home directory.
else:
homedir = os.path.expanduser('~/')
if homedir == '~/':
raise ValueError("Could not find a default download directory")
# append "nltk_data" to the home directory
return os.path.join(homedir, 'nltk_data')
def _get_download_dir(self):
"""
The default directory to which packages will be downloaded.
This defaults to the value returned by ``default_download_dir()``.
To override this default on a case-by-case basis, use the
``download_dir`` argument when calling ``download()``.
"""
return self._download_dir
def _set_download_dir(self, download_dir):
self._download_dir = download_dir
# Clear the status cache.
self._status_cache.clear()
download_dir = property(_get_download_dir, _set_download_dir)
# /////////////////////////////////////////////////////////////////
# Interactive Shell
# /////////////////////////////////////////////////////////////////
def _interactive_download(self):
# Try the GUI first; if that doesn't work, try the simple
# interactive shell.
if TKINTER:
try:
DownloaderGUI(self).mainloop()
except TclError:
DownloaderShell(self).run()
else:
DownloaderShell(self).run()
class DownloaderShell(object):
def __init__(self, dataserver):
self._ds = dataserver
def _simple_interactive_menu(self, *options):
print('-' * 75)
spc = (68 - sum(len(o) for o in options)) // (len(options) - 1) * ' '
print(' ' + spc.join(options))
# w = 76/len(options)
# fmt = ' ' + ('%-'+str(w)+'s')*(len(options)-1) + '%s'
# print fmt % options
print('-' * 75)
def run(self):
print('NLTK Downloader')
while True:
self._simple_interactive_menu(
'd) Download',
'l) List',
' u) Update',
'c) Config',
'h) Help',
'q) Quit',
)
user_input = input('Downloader> ').strip()
if not user_input:
print()
continue
command = user_input.lower().split()[0]
args = user_input.split()[1:]
try:
if command == 'l':
print()
self._ds.list(self._ds.download_dir, header=False, more_prompt=True)
elif command == 'h':
self._simple_interactive_help()
elif command == 'c':
self._simple_interactive_config()
elif command in ('q', 'x'):
return
elif command == 'd':
self._simple_interactive_download(args)
elif command == 'u':
self._simple_interactive_update()
else:
print('Command %r unrecognized' % user_input)
except HTTPError as e:
print('Error reading from server: %s' % e)
except URLError as e:
print('Error connecting to server: %s' % e.reason)
# try checking if user_input is a package name, &
# downloading it?
print()
def _simple_interactive_download(self, args):
if args:
for arg in args:
try:
self._ds.download(arg, prefix=' ')
except (IOError, ValueError) as e:
print(e)
else:
while True:
print()
print('Download which package (l=list; x=cancel)?')
user_input = input(' Identifier> ')
if user_input.lower() == 'l':
self._ds.list(
self._ds.download_dir,
header=False,
more_prompt=True,
skip_installed=True,
)
continue
elif user_input.lower() in ('x', 'q', ''):
return
elif user_input:
for id in user_input.split():
try:
self._ds.download(id, prefix=' ')
except (IOError, ValueError) as e:
print(e)
break
def _simple_interactive_update(self):
while True:
stale_packages = []
stale = partial = False
for info in sorted(getattr(self._ds, 'packages')(), key=str):
if self._ds.status(info) == self._ds.STALE:
stale_packages.append((info.id, info.name))
print()
if stale_packages:
print('Will update following packages (o=ok; x=cancel)')
for pid, pname in stale_packages:
name = textwrap.fill(
'-' * 27 + (pname), 75, subsequent_indent=27 * ' '
)[27:]
print(' [ ] %s %s' % (pid.ljust(20, '.'), name))
print()
user_input = input(' Identifier> ')
if user_input.lower() == 'o':
for pid, pname in stale_packages:
try:
self._ds.download(pid, prefix=' ')
except (IOError, ValueError) as e:
print(e)
break
elif user_input.lower() in ('x', 'q', ''):
return
else:
print('Nothing to update.')
return
def _simple_interactive_help(self):
print()
print('Commands:')
print(
' d) Download a package or collection u) Update out of date packages'
)
print(' l) List packages & collections h) Help')
print(' c) View & Modify Configuration q) Quit')
def _show_config(self):
print()
print('Data Server:')
print(' - URL: <%s>' % self._ds.url)
print((' - %d Package Collections Available' % len(self._ds.collections())))
print((' - %d Individual Packages Available' % len(self._ds.packages())))
print()
print('Local Machine:')
print(' - Data directory: %s' % self._ds.download_dir)
def _simple_interactive_config(self):
self._show_config()
while True:
print()
self._simple_interactive_menu(
's) Show Config', 'u) Set Server URL', 'd) Set Data Dir', 'm) Main Menu'
)
user_input = input('Config> ').strip().lower()
if user_input == 's':
self._show_config()
elif user_input == 'd':
new_dl_dir = input(' New Directory> ').strip()
if new_dl_dir in ('', 'x', 'q', 'X', 'Q'):
print(' Cancelled!')
elif os.path.isdir(new_dl_dir):
self._ds.download_dir = new_dl_dir
else:
print(('Directory %r not found! Create it first.' % new_dl_dir))
elif user_input == 'u':
new_url = input(' New URL> ').strip()
if new_url in ('', 'x', 'q', 'X', 'Q'):
print(' Cancelled!')
else:
if not new_url.startswith(('http://', 'https://')):
new_url = 'http://' + new_url
try:
self._ds.url = new_url
except Exception as e:
print('Error reading <%r>:\n %s' % (new_url, e))
elif user_input == 'm':
break
class DownloaderGUI(object):
"""
Graphical interface for downloading packages from the NLTK data
server.
"""
# /////////////////////////////////////////////////////////////////
# Column Configuration
# /////////////////////////////////////////////////////////////////
COLUMNS = [
'',
'Identifier',
'Name',
'Size',
'Status',
'Unzipped Size',
'Copyright',
'Contact',
'License',
'Author',
'Subdir',
'Checksum',
]
"""A list of the names of columns. This controls the order in
which the columns will appear. If this is edited, then
``_package_to_columns()`` may need to be edited to match."""
COLUMN_WEIGHTS = {'': 0, 'Name': 5, 'Size': 0, 'Status': 0}
"""A dictionary specifying how columns should be resized when the
table is resized. Columns with weight 0 will not be resized at
all; and columns with high weight will be resized more.
Default weight (for columns not explicitly listed) is 1."""
COLUMN_WIDTHS = {
'': 1,
'Identifier': 20,
'Name': 45,
'Size': 10,
'Unzipped Size': 10,
'Status': 12,
}
"""A dictionary specifying how wide each column should be, in
characters. The default width (for columns not explicitly
listed) is specified by ``DEFAULT_COLUMN_WIDTH``."""
DEFAULT_COLUMN_WIDTH = 30
"""The default width for columns that are not explicitly listed
in ``COLUMN_WIDTHS``."""
INITIAL_COLUMNS = ['', 'Identifier', 'Name', 'Size', 'Status']
"""The set of columns that should be displayed by default."""
# Perform a few import-time sanity checks to make sure that the
# column configuration variables are defined consistently:
for c in COLUMN_WEIGHTS:
assert c in COLUMNS
for c in COLUMN_WIDTHS:
assert c in COLUMNS
for c in INITIAL_COLUMNS:
assert c in COLUMNS
# /////////////////////////////////////////////////////////////////
# Color Configuration
# /////////////////////////////////////////////////////////////////
_BACKDROP_COLOR = ('#000', '#ccc')
_ROW_COLOR = {
Downloader.INSTALLED: ('#afa', '#080'),
Downloader.PARTIAL: ('#ffa', '#880'),
Downloader.STALE: ('#faa', '#800'),
Downloader.NOT_INSTALLED: ('#fff', '#888'),
}
_MARK_COLOR = ('#000', '#ccc')
# _FRONT_TAB_COLOR = ('#ccf', '#008')
# _BACK_TAB_COLOR = ('#88a', '#448')
_FRONT_TAB_COLOR = ('#fff', '#45c')
_BACK_TAB_COLOR = ('#aaa', '#67a')
_PROGRESS_COLOR = ('#f00', '#aaa')
_TAB_FONT = 'helvetica -16 bold'
# /////////////////////////////////////////////////////////////////
# Constructor
# /////////////////////////////////////////////////////////////////
def __init__(self, dataserver, use_threads=True):
self._ds = dataserver
self._use_threads = use_threads
# For the threaded downloader:
self._download_lock = threading.Lock()
self._download_msg_queue = []
self._download_abort_queue = []
self._downloading = False
# For tkinter after callbacks:
self._afterid = {}
# A message log.
self._log_messages = []
self._log_indent = 0
self._log('NLTK Downloader Started!')
# Create the main window.
top = self.top = Tk()
top.geometry('+50+50')
top.title('NLTK Downloader')
top.configure(background=self._BACKDROP_COLOR[1])
# Set up some bindings now, in case anything goes wrong.
top.bind('<Control-q>', self.destroy)
top.bind('<Control-x>', self.destroy)
self._destroyed = False
self._column_vars = {}
# Initialize the GUI.
self._init_widgets()
self._init_menu()
try:
self._fill_table()
except HTTPError as e:
showerror('Error reading from server', e)
except URLError as e:
showerror('Error connecting to server', e.reason)
self._show_info()
self._select_columns()
self._table.select(0)
# Make sure we get notified when we're destroyed, so we can
# cancel any download in progress.
self._table.bind('<Destroy>', self._destroy)
def _log(self, msg):
self._log_messages.append(
'%s %s%s' % (time.ctime(), ' | ' * self._log_indent, msg)
)
# /////////////////////////////////////////////////////////////////
# Internals
# /////////////////////////////////////////////////////////////////
def _init_widgets(self):
# Create the top-level frame structures
f1 = Frame(self.top, relief='raised', border=2, padx=8, pady=0)
f1.pack(sid='top', expand=True, fill='both')
f1.grid_rowconfigure(2, weight=1)
f1.grid_columnconfigure(0, weight=1)
Frame(f1, height=8).grid(column=0, row=0) # spacer
tabframe = Frame(f1)
tabframe.grid(column=0, row=1, sticky='news')
tableframe = Frame(f1)
tableframe.grid(column=0, row=2, sticky='news')
buttonframe = Frame(f1)
buttonframe.grid(column=0, row=3, sticky='news')
Frame(f1, height=8).grid(column=0, row=4) # spacer
infoframe = Frame(f1)
infoframe.grid(column=0, row=5, sticky='news')
Frame(f1, height=8).grid(column=0, row=6) # spacer
progressframe = Frame(
self.top, padx=3, pady=3, background=self._BACKDROP_COLOR[1]
)
progressframe.pack(side='bottom', fill='x')
self.top['border'] = 0
self.top['highlightthickness'] = 0
# Create the tabs
self._tab_names = ['Collections', 'Corpora', 'Models', 'All Packages']
self._tabs = {}
for i, tab in enumerate(self._tab_names):
label = Label(tabframe, text=tab, font=self._TAB_FONT)
label.pack(side='left', padx=((i + 1) % 2) * 10)
label.bind('<Button-1>', self._select_tab)
self._tabs[tab.lower()] = label
# Create the table.
column_weights = [self.COLUMN_WEIGHTS.get(column, 1) for column in self.COLUMNS]
self._table = Table(
tableframe,
self.COLUMNS,
column_weights=column_weights,
highlightthickness=0,
listbox_height=16,
reprfunc=self._table_reprfunc,
)
self._table.columnconfig(0, foreground=self._MARK_COLOR[0]) # marked
for i, column in enumerate(self.COLUMNS):
width = self.COLUMN_WIDTHS.get(column, self.DEFAULT_COLUMN_WIDTH)
self._table.columnconfig(i, width=width)
self._table.pack(expand=True, fill='both')
self._table.focus()
self._table.bind_to_listboxes('<Double-Button-1>', self._download)
self._table.bind('<space>', self._table_mark)
self._table.bind('<Return>', self._download)
self._table.bind('<Left>', self._prev_tab)
self._table.bind('<Right>', self._next_tab)
self._table.bind('<Control-a>', self._mark_all)
# Create entry boxes for URL & download_dir
infoframe.grid_columnconfigure(1, weight=1)
info = [
('url', 'Server Index:', self._set_url),
('download_dir', 'Download Directory:', self._set_download_dir),
]
self._info = {}
for (i, (key, label, callback)) in enumerate(info):
Label(infoframe, text=label).grid(column=0, row=i, sticky='e')
entry = Entry(
infoframe, font='courier', relief='groove', disabledforeground='black'
)
self._info[key] = (entry, callback)
entry.bind('<Return>', self._info_save)
entry.bind('<Button-1>', lambda e, key=key: self._info_edit(key))
entry.grid(column=1, row=i, sticky='ew')
# If the user edits url or download_dir, and then clicks outside
# the entry box, then save their results.
self.top.bind('<Button-1>', self._info_save)
# Create Download & Refresh buttons.
self._download_button = Button(
buttonframe, text='Download', command=self._download, width=8
)
self._download_button.pack(side='left')
self._refresh_button = Button(
buttonframe, text='Refresh', command=self._refresh, width=8
)
self._refresh_button.pack(side='right')
# Create Progress bar
self._progresslabel = Label(
progressframe,
text='',
foreground=self._BACKDROP_COLOR[0],
background=self._BACKDROP_COLOR[1],
)
self._progressbar = Canvas(
progressframe,
width=200,
height=16,
background=self._PROGRESS_COLOR[1],
relief='sunken',
border=1,
)
self._init_progressbar()
self._progressbar.pack(side='right')
self._progresslabel.pack(side='left')
def _init_menu(self):
menubar = Menu(self.top)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(
label='Download', underline=0, command=self._download, accelerator='Return'
)
filemenu.add_separator()
filemenu.add_command(
label='Change Server Index',
underline=7,
command=lambda: self._info_edit('url'),
)
filemenu.add_command(
label='Change Download Directory',
underline=0,
command=lambda: self._info_edit('download_dir'),
)
filemenu.add_separator()
filemenu.add_command(label='Show Log', underline=5, command=self._show_log)
filemenu.add_separator()
filemenu.add_command(
label='Exit', underline=1, command=self.destroy, accelerator='Ctrl-x'
)
menubar.add_cascade(label='File', underline=0, menu=filemenu)
# Create a menu to control which columns of the table are
# shown. n.b.: we never hide the first two columns (mark and
# identifier).
viewmenu = Menu(menubar, tearoff=0)
for column in self._table.column_names[2:]:
var = IntVar(self.top)
assert column not in self._column_vars
self._column_vars[column] = var
if column in self.INITIAL_COLUMNS:
var.set(1)
viewmenu.add_checkbutton(
label=column, underline=0, variable=var, command=self._select_columns
)
menubar.add_cascade(label='View', underline=0, menu=viewmenu)
# Create a sort menu
# [xx] this should be selectbuttons; and it should include
# reversed sorts as options.
sortmenu = Menu(menubar, tearoff=0)
for column in self._table.column_names[1:]:
sortmenu.add_command(
label='Sort by %s' % column,
command=(lambda c=column: self._table.sort_by(c, 'ascending')),
)
sortmenu.add_separator()
# sortmenu.add_command(label='Descending Sort:')
for column in self._table.column_names[1:]:
sortmenu.add_command(
label='Reverse sort by %s' % column,
command=(lambda c=column: self._table.sort_by(c, 'descending')),
)
menubar.add_cascade(label='Sort', underline=0, menu=sortmenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label='About', underline=0, command=self.about)
helpmenu.add_command(
label='Instructions', underline=0, command=self.help, accelerator='F1'
)
menubar.add_cascade(label='Help', underline=0, menu=helpmenu)
self.top.bind('<F1>', self.help)
self.top.config(menu=menubar)
def _select_columns(self):
for (column, var) in self._column_vars.items():
if var.get():
self._table.show_column(column)
else:
self._table.hide_column(column)
def _refresh(self):
self._ds.clear_status_cache()
try:
self._fill_table()
except HTTPError as e:
showerror('Error reading from server', e)
except URLError as e:
showerror('Error connecting to server', e.reason)
self._table.select(0)
def _info_edit(self, info_key):
self._info_save() # just in case.
(entry, callback) = self._info[info_key]
entry['state'] = 'normal'
entry['relief'] = 'sunken'
entry.focus()
def _info_save(self, e=None):
focus = self._table
for entry, callback in self._info.values():
if entry['state'] == 'disabled':
continue
if e is not None and e.widget is entry and e.keysym != 'Return':
focus = entry
else:
entry['state'] = 'disabled'
entry['relief'] = 'groove'
callback(entry.get())
focus.focus()
def _table_reprfunc(self, row, col, val):
if self._table.column_names[col].endswith('Size'):
if isinstance(val, string_types):
return ' %s' % val
elif val < 1024 ** 2:
return ' %.1f KB' % (val / 1024.0 ** 1)
elif val < 1024 ** 3:
return ' %.1f MB' % (val / 1024.0 ** 2)
else:
return ' %.1f GB' % (val / 1024.0 ** 3)
if col in (0, ''):
return str(val)
else:
return ' %s' % val
def _set_url(self, url):
if url == self._ds.url:
return
try:
self._ds.url = url
self._fill_table()
except IOError as e:
showerror('Error Setting Server Index', str(e))
self._show_info()
def _set_download_dir(self, download_dir):
if self._ds.download_dir == download_dir:
return
# check if the dir exists, and if not, ask if we should create it?
# Clear our status cache, & re-check what's installed
self._ds.download_dir = download_dir
try:
self._fill_table()
except HTTPError as e:
showerror('Error reading from server', e)
except URLError as e:
showerror('Error connecting to server', e.reason)
self._show_info()
def _show_info(self):
print('showing info', self._ds.url)
for entry, cb in self._info.values():
entry['state'] = 'normal'
entry.delete(0, 'end')
self._info['url'][0].insert(0, self._ds.url)
self._info['download_dir'][0].insert(0, self._ds.download_dir)
for entry, cb in self._info.values():
entry['state'] = 'disabled'
def _prev_tab(self, *e):
for i, tab in enumerate(self._tab_names):
if tab.lower() == self._tab and i > 0:
self._tab = self._tab_names[i - 1].lower()
try:
return self._fill_table()
except HTTPError as e:
showerror('Error reading from server', e)
except URLError as e:
showerror('Error connecting to server', e.reason)
def _next_tab(self, *e):
for i, tab in enumerate(self._tab_names):
if tab.lower() == self._tab and i < (len(self._tabs) - 1):
self._tab = self._tab_names[i + 1].lower()
try:
return self._fill_table()
except HTTPError as e:
showerror('Error reading from server', e)
except URLError as e:
showerror('Error connecting to server', e.reason)
def _select_tab(self, event):
self._tab = event.widget['text'].lower()
try:
self._fill_table()
except HTTPError as e:
showerror('Error reading from server', e)
except URLError as e:
showerror('Error connecting to server', e.reason)
_tab = 'collections'
# _tab = 'corpora'
_rows = None
def _fill_table(self):
selected_row = self._table.selected_row()
self._table.clear()
if self._tab == 'all packages':
items = self._ds.packages()
elif self._tab == 'corpora':
items = self._ds.corpora()
elif self._tab == 'models':
items = self._ds.models()
elif self._tab == 'collections':
items = self._ds.collections()
else:
assert 0, 'bad tab value %r' % self._tab
rows = [self._package_to_columns(item) for item in items]
self._table.extend(rows)
# Highlight the active tab.
for tab, label in self._tabs.items():
if tab == self._tab:
label.configure(
foreground=self._FRONT_TAB_COLOR[0],
background=self._FRONT_TAB_COLOR[1],
)
else:
label.configure(
foreground=self._BACK_TAB_COLOR[0],
background=self._BACK_TAB_COLOR[1],
)
self._table.sort_by('Identifier', order='ascending')
self._color_table()
self._table.select(selected_row)
# This is a hack, because the scrollbar isn't updating its
# position right -- I'm not sure what the underlying cause is
# though. (This is on OS X w/ python 2.5) The length of
# delay that's necessary seems to depend on how fast the
# comptuer is. :-/
self.top.after(150, self._table._scrollbar.set, *self._table._mlb.yview())
self.top.after(300, self._table._scrollbar.set, *self._table._mlb.yview())
def _update_table_status(self):
for row_num in range(len(self._table)):
status = self._ds.status(self._table[row_num, 'Identifier'])
self._table[row_num, 'Status'] = status
self._color_table()
def _download(self, *e):
# If we're using threads, then delegate to the threaded
# downloader instead.
if self._use_threads:
return self._download_threaded(*e)
marked = [
self._table[row, 'Identifier']
for row in range(len(self._table))
if self._table[row, 0] != ''
]
selection = self._table.selected_row()
if not marked and selection is not None:
marked = [self._table[selection, 'Identifier']]
download_iter = self._ds.incr_download(marked, self._ds.download_dir)
self._log_indent = 0
self._download_cb(download_iter, marked)
_DL_DELAY = 10
def _download_cb(self, download_iter, ids):
try:
msg = next(download_iter)
except StopIteration:
# self._fill_table(sort=False)
self._update_table_status()
afterid = self.top.after(10, self._show_progress, 0)
self._afterid['_download_cb'] = afterid
return
def show(s):
self._progresslabel['text'] = s
self._log(s)
if isinstance(msg, ProgressMessage):
self._show_progress(msg.progress)
elif isinstance(msg, ErrorMessage):
show(msg.message)
if msg.package is not None:
self._select(msg.package.id)
self._show_progress(None)
return # halt progress.
elif isinstance(msg, StartCollectionMessage):
show('Downloading collection %s' % msg.collection.id)
self._log_indent += 1
elif isinstance(msg, StartPackageMessage):
show('Downloading package %s' % msg.package.id)
elif isinstance(msg, UpToDateMessage):
show('Package %s is up-to-date!' % msg.package.id)
# elif isinstance(msg, StaleMessage):
# show('Package %s is out-of-date or corrupt' % msg.package.id)
elif isinstance(msg, FinishDownloadMessage):
show('Finished downloading %r.' % msg.package.id)
elif isinstance(msg, StartUnzipMessage):
show('Unzipping %s' % msg.package.filename)
elif isinstance(msg, FinishCollectionMessage):
self._log_indent -= 1
show('Finished downloading collection %r.' % msg.collection.id)
self._clear_mark(msg.collection.id)
elif isinstance(msg, FinishPackageMessage):
self._clear_mark(msg.package.id)
afterid = self.top.after(self._DL_DELAY, self._download_cb, download_iter, ids)
self._afterid['_download_cb'] = afterid
def _select(self, id):
for row in range(len(self._table)):
if self._table[row, 'Identifier'] == id:
self._table.select(row)
return
def _color_table(self):
# Color rows according to status.
for row in range(len(self._table)):
bg, sbg = self._ROW_COLOR[self._table[row, 'Status']]
fg, sfg = ('black', 'white')
self._table.rowconfig(
row,
foreground=fg,
selectforeground=sfg,
background=bg,
selectbackground=sbg,
)
# Color the marked column
self._table.itemconfigure(
row, 0, foreground=self._MARK_COLOR[0], background=self._MARK_COLOR[1]
)
def _clear_mark(self, id):
for row in range(len(self._table)):
if self._table[row, 'Identifier'] == id:
self._table[row, 0] = ''
def _mark_all(self, *e):
for row in range(len(self._table)):
self._table[row, 0] = 'X'
def _table_mark(self, *e):
selection = self._table.selected_row()
if selection >= 0:
if self._table[selection][0] != '':
self._table[selection, 0] = ''
else:
self._table[selection, 0] = 'X'
self._table.select(delta=1)
def _show_log(self):
text = '\n'.join(self._log_messages)
ShowText(self.top, 'NLTK Downloader Log', text)
def _package_to_columns(self, pkg):
"""
Given a package, return a list of values describing that
package, one for each column in ``self.COLUMNS``.
"""
row = []
for column_index, column_name in enumerate(self.COLUMNS):
if column_index == 0: # Mark:
row.append('')
elif column_name == 'Identifier':
row.append(pkg.id)
elif column_name == 'Status':
row.append(self._ds.status(pkg))
else:
attr = column_name.lower().replace(' ', '_')
row.append(getattr(pkg, attr, 'n/a'))
return row
# /////////////////////////////////////////////////////////////////
# External Interface
# /////////////////////////////////////////////////////////////////
def destroy(self, *e):
if self._destroyed:
return
self.top.destroy()
self._destroyed = True
def _destroy(self, *e):
if self.top is not None:
for afterid in self._afterid.values():
self.top.after_cancel(afterid)
# Abort any download in progress.
if self._downloading and self._use_threads:
self._abort_download()
# Make sure the garbage collector destroys these now;
# otherwise, they may get destroyed when we're not in the main
# thread, which would make Tkinter unhappy.
self._column_vars.clear()
def mainloop(self, *args, **kwargs):
self.top.mainloop(*args, **kwargs)
# /////////////////////////////////////////////////////////////////
# HELP
# /////////////////////////////////////////////////////////////////
HELP = textwrap.dedent(
"""\
This tool can be used to download a variety of corpora and models
that can be used with NLTK. Each corpus or model is distributed
in a single zip file, known as a \"package file.\" You can
download packages individually, or you can download pre-defined
collections of packages.
When you download a package, it will be saved to the \"download
directory.\" A default download directory is chosen when you run
the downloader; but you may also select a different download
directory. On Windows, the default download directory is
\"package.\"
The NLTK downloader can be used to download a variety of corpora,
models, and other data packages.
Keyboard shortcuts::
[return]\t Download
[up]\t Select previous package
[down]\t Select next package
[left]\t Select previous tab
[right]\t Select next tab
"""
)
def help(self, *e):
# The default font's not very legible; try using 'fixed' instead.
try:
ShowText(
self.top,
'Help: NLTK Dowloader',
self.HELP.strip(),
width=75,
font='fixed',
)
except:
ShowText(self.top, 'Help: NLTK Downloader', self.HELP.strip(), width=75)
def about(self, *e):
ABOUT = "NLTK Downloader\n" + "Written by Edward Loper"
TITLE = 'About: NLTK Downloader'
try:
from six.moves.tkinter_messagebox import Message
Message(message=ABOUT, title=TITLE).show()
except ImportError:
ShowText(self.top, TITLE, ABOUT)
# /////////////////////////////////////////////////////////////////
# Progress Bar
# /////////////////////////////////////////////////////////////////
_gradient_width = 5
def _init_progressbar(self):
c = self._progressbar
width, height = int(c['width']), int(c['height'])
for i in range(0, (int(c['width']) * 2) // self._gradient_width):
c.create_line(
i * self._gradient_width + 20,
-20,
i * self._gradient_width - height - 20,
height + 20,
width=self._gradient_width,
fill='#%02x0000' % (80 + abs(i % 6 - 3) * 12),
)
c.addtag_all('gradient')
c.itemconfig('gradient', state='hidden')
# This is used to display progress
c.addtag_withtag(
'redbox', c.create_rectangle(0, 0, 0, 0, fill=self._PROGRESS_COLOR[0])
)
def _show_progress(self, percent):
c = self._progressbar
if percent is None:
c.coords('redbox', 0, 0, 0, 0)
c.itemconfig('gradient', state='hidden')
else:
width, height = int(c['width']), int(c['height'])
x = percent * int(width) // 100 + 1
c.coords('redbox', 0, 0, x, height + 1)
def _progress_alive(self):
c = self._progressbar
if not self._downloading:
c.itemconfig('gradient', state='hidden')
else:
c.itemconfig('gradient', state='normal')
x1, y1, x2, y2 = c.bbox('gradient')
if x1 <= -100:
c.move('gradient', (self._gradient_width * 6) - 4, 0)
else:
c.move('gradient', -4, 0)
afterid = self.top.after(200, self._progress_alive)
self._afterid['_progress_alive'] = afterid
# /////////////////////////////////////////////////////////////////
# Threaded downloader
# /////////////////////////////////////////////////////////////////
def _download_threaded(self, *e):
# If the user tries to start a new download while we're already
# downloading something, then abort the current download instead.
if self._downloading:
self._abort_download()
return
# Change the 'download' button to an 'abort' button.
self._download_button['text'] = 'Cancel'
marked = [
self._table[row, 'Identifier']
for row in range(len(self._table))
if self._table[row, 0] != ''
]
selection = self._table.selected_row()
if not marked and selection is not None:
marked = [self._table[selection, 'Identifier']]
# Create a new data server object for the download operation,
# just in case the user modifies our data server during the
# download (e.g., clicking 'refresh' or editing the index url).
ds = Downloader(self._ds.url, self._ds.download_dir)
# Start downloading in a separate thread.
assert self._download_msg_queue == []
assert self._download_abort_queue == []
self._DownloadThread(
ds,
marked,
self._download_lock,
self._download_msg_queue,
self._download_abort_queue,
).start()
# Monitor the download message queue & display its progress.
self._log_indent = 0
self._downloading = True
self._monitor_message_queue()
# Display an indication that we're still alive and well by
# cycling the progress bar.
self._progress_alive()
def _abort_download(self):
if self._downloading:
self._download_lock.acquire()
self._download_abort_queue.append('abort')
self._download_lock.release()
class _DownloadThread(threading.Thread):
def __init__(self, data_server, items, lock, message_queue, abort):
self.data_server = data_server
self.items = items
self.lock = lock
self.message_queue = message_queue
self.abort = abort
threading.Thread.__init__(self)
def run(self):
for msg in self.data_server.incr_download(self.items):
self.lock.acquire()
self.message_queue.append(msg)
# Check if we've been told to kill ourselves:
if self.abort:
self.message_queue.append('aborted')
self.lock.release()
return
self.lock.release()
self.lock.acquire()
self.message_queue.append('finished')
self.lock.release()
_MONITOR_QUEUE_DELAY = 100
def _monitor_message_queue(self):
def show(s):
self._progresslabel['text'] = s
self._log(s)
# Try to acquire the lock; if it's busy, then just try again later.
if not self._download_lock.acquire():
return
for msg in self._download_msg_queue:
# Done downloading?
if msg == 'finished' or msg == 'aborted':
# self._fill_table(sort=False)
self._update_table_status()
self._downloading = False
self._download_button['text'] = 'Download'
del self._download_msg_queue[:]
del self._download_abort_queue[:]
self._download_lock.release()
if msg == 'aborted':
show('Download aborted!')
self._show_progress(None)
else:
afterid = self.top.after(100, self._show_progress, None)
self._afterid['_monitor_message_queue'] = afterid
return
# All other messages
elif isinstance(msg, ProgressMessage):
self._show_progress(msg.progress)
elif isinstance(msg, ErrorMessage):
show(msg.message)
if msg.package is not None:
self._select(msg.package.id)
self._show_progress(None)
self._downloading = False
return # halt progress.
elif isinstance(msg, StartCollectionMessage):
show('Downloading collection %r' % msg.collection.id)
self._log_indent += 1
elif isinstance(msg, StartPackageMessage):
self._ds.clear_status_cache(msg.package.id)
show('Downloading package %r' % msg.package.id)
elif isinstance(msg, UpToDateMessage):
show('Package %s is up-to-date!' % msg.package.id)
# elif isinstance(msg, StaleMessage):
# show('Package %s is out-of-date or corrupt; updating it' %
# msg.package.id)
elif isinstance(msg, FinishDownloadMessage):
show('Finished downloading %r.' % msg.package.id)
elif isinstance(msg, StartUnzipMessage):
show('Unzipping %s' % msg.package.filename)
elif isinstance(msg, FinishUnzipMessage):
show('Finished installing %s' % msg.package.id)
elif isinstance(msg, FinishCollectionMessage):
self._log_indent -= 1
show('Finished downloading collection %r.' % msg.collection.id)
self._clear_mark(msg.collection.id)
elif isinstance(msg, FinishPackageMessage):
self._update_table_status()
self._clear_mark(msg.package.id)
# Let the user know when we're aborting a download (but
# waiting for a good point to abort it, so we don't end up
# with a partially unzipped package or anything like that).
if self._download_abort_queue:
self._progresslabel['text'] = 'Aborting download...'
# Clear the message queue and then release the lock
del self._download_msg_queue[:]
self._download_lock.release()
# Check the queue again after MONITOR_QUEUE_DELAY msec.
afterid = self.top.after(self._MONITOR_QUEUE_DELAY, self._monitor_message_queue)
self._afterid['_monitor_message_queue'] = afterid
######################################################################
# Helper Functions
######################################################################
# [xx] It may make sense to move these to nltk.internals.
def md5_hexdigest(file):
"""
Calculate and return the MD5 checksum for a given file.
``file`` may either be a filename or an open stream.
"""
if isinstance(file, string_types):
with open(file, 'rb') as infile:
return _md5_hexdigest(infile)
return _md5_hexdigest(file)
def _md5_hexdigest(fp):
md5_digest = md5()
while True:
block = fp.read(1024 * 16) # 16k blocks
if not block:
break
md5_digest.update(block)
return md5_digest.hexdigest()
# change this to periodically yield progress messages?
# [xx] get rid of topdir parameter -- we should be checking
# this when we build the index, anyway.
def unzip(filename, root, verbose=True):
"""
Extract the contents of the zip file ``filename`` into the
directory ``root``.
"""
for message in _unzip_iter(filename, root, verbose):
if isinstance(message, ErrorMessage):
raise Exception(message)
def _unzip_iter(filename, root, verbose=True):
if verbose:
sys.stdout.write('Unzipping %s' % os.path.split(filename)[1])
sys.stdout.flush()
try:
zf = zipfile.ZipFile(filename)
except zipfile.error as e:
yield ErrorMessage(filename, 'Error with downloaded zip file')
return
except Exception as e:
yield ErrorMessage(filename, e)
return
zf.extractall(root)
if verbose:
print()
######################################################################
# Index Builder
######################################################################
# This may move to a different file sometime.
def build_index(root, base_url):
"""
Create a new data.xml index file, by combining the xml description
files for various packages and collections. ``root`` should be the
path to a directory containing the package xml and zip files; and
the collection xml files. The ``root`` directory is expected to
have the following subdirectories::
root/
packages/ .................. subdirectory for packages
corpora/ ................. zip & xml files for corpora
grammars/ ................ zip & xml files for grammars
taggers/ ................. zip & xml files for taggers
tokenizers/ .............. zip & xml files for tokenizers
etc.
collections/ ............... xml files for collections
For each package, there should be two files: ``package.zip``
(where *package* is the package name)
which contains the package itself as a compressed zip file; and
``package.xml``, which is an xml description of the package. The
zipfile ``package.zip`` should expand to a single subdirectory
named ``package/``. The base filename ``package`` must match
the identifier given in the package's xml file.
For each collection, there should be a single file ``collection.zip``
describing the collection, where *collection* is the name of the collection.
All identifiers (for both packages and collections) must be unique.
"""
# Find all packages.
packages = []
for pkg_xml, zf, subdir in _find_packages(os.path.join(root, 'packages')):
zipstat = os.stat(zf.filename)
url = '%s/%s/%s' % (base_url, subdir, os.path.split(zf.filename)[1])
unzipped_size = sum(zf_info.file_size for zf_info in zf.infolist())
# Fill in several fields of the package xml with calculated values.
pkg_xml.set('unzipped_size', '%s' % unzipped_size)
pkg_xml.set('size', '%s' % zipstat.st_size)
pkg_xml.set('checksum', '%s' % md5_hexdigest(zf.filename))
pkg_xml.set('subdir', subdir)
# pkg_xml.set('svn_revision', _svn_revision(zf.filename))
if not pkg_xml.get('url'):
pkg_xml.set('url', url)
# Record the package.
packages.append(pkg_xml)
# Find all collections
collections = list(_find_collections(os.path.join(root, 'collections')))
# Check that all UIDs are unique
uids = set()
for item in packages + collections:
if item.get('id') in uids:
raise ValueError('Duplicate UID: %s' % item.get('id'))
uids.add(item.get('id'))
# Put it all together
top_elt = ElementTree.Element('nltk_data')
top_elt.append(ElementTree.Element('packages'))
for package in packages:
top_elt[0].append(package)
top_elt.append(ElementTree.Element('collections'))
for collection in collections:
top_elt[1].append(collection)
_indent_xml(top_elt)
return top_elt
def _indent_xml(xml, prefix=''):
"""
Helper for ``build_index()``: Given an XML ``ElementTree``, modify it
(and its descendents) ``text`` and ``tail`` attributes to generate
an indented tree, where each nested element is indented by 2
spaces with respect to its parent.
"""
if len(xml) > 0:
xml.text = (xml.text or '').strip() + '\n' + prefix + ' '
for child in xml:
_indent_xml(child, prefix + ' ')
for child in xml[:-1]:
child.tail = (child.tail or '').strip() + '\n' + prefix + ' '
xml[-1].tail = (xml[-1].tail or '').strip() + '\n' + prefix
def _check_package(pkg_xml, zipfilename, zf):
"""
Helper for ``build_index()``: Perform some checks to make sure that
the given package is consistent.
"""
# The filename must patch the id given in the XML file.
uid = os.path.splitext(os.path.split(zipfilename)[1])[0]
if pkg_xml.get('id') != uid:
raise ValueError(
'package identifier mismatch (%s vs %s)' % (pkg_xml.get('id'), uid)
)
# Zip file must expand to a subdir whose name matches uid.
if sum((name != uid and not name.startswith(uid + '/')) for name in zf.namelist()):
raise ValueError(
'Zipfile %s.zip does not expand to a single '
'subdirectory %s/' % (uid, uid)
)
# update for git?
def _svn_revision(filename):
"""
Helper for ``build_index()``: Calculate the subversion revision
number for a given file (by using ``subprocess`` to run ``svn``).
"""
p = subprocess.Popen(
['svn', 'status', '-v', filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(stdout, stderr) = p.communicate()
if p.returncode != 0 or stderr or not stdout:
raise ValueError(
'Error determining svn_revision for %s: %s'
% (os.path.split(filename)[1], textwrap.fill(stderr))
)
return stdout.split()[2]
def _find_collections(root):
"""
Helper for ``build_index()``: Yield a list of ElementTree.Element
objects, each holding the xml for a single package collection.
"""
packages = []
for dirname, subdirs, files in os.walk(root):
for filename in files:
if filename.endswith('.xml'):
xmlfile = os.path.join(dirname, filename)
yield ElementTree.parse(xmlfile).getroot()
def _find_packages(root):
"""
Helper for ``build_index()``: Yield a list of tuples
``(pkg_xml, zf, subdir)``, where:
- ``pkg_xml`` is an ``ElementTree.Element`` holding the xml for a
package
- ``zf`` is a ``zipfile.ZipFile`` for the package's contents.
- ``subdir`` is the subdirectory (relative to ``root``) where
the package was found (e.g. 'corpora' or 'grammars').
"""
from nltk.corpus.reader.util import _path_from
# Find all packages.
packages = []
for dirname, subdirs, files in os.walk(root):
relpath = '/'.join(_path_from(root, dirname))
for filename in files:
if filename.endswith('.xml'):
xmlfilename = os.path.join(dirname, filename)
zipfilename = xmlfilename[:-4] + '.zip'
try:
zf = zipfile.ZipFile(zipfilename)
except Exception as e:
raise ValueError('Error reading file %r!\n%s' % (zipfilename, e))
try:
pkg_xml = ElementTree.parse(xmlfilename).getroot()
except Exception as e:
raise ValueError('Error reading file %r!\n%s' % (xmlfilename, e))
# Check that the UID matches the filename
uid = os.path.split(xmlfilename[:-4])[1]
if pkg_xml.get('id') != uid:
raise ValueError(
'package identifier mismatch (%s '
'vs %s)' % (pkg_xml.get('id'), uid)
)
# Check that the zipfile expands to a subdir whose
# name matches the uid.
if sum(
(name != uid and not name.startswith(uid + '/'))
for name in zf.namelist()
):
raise ValueError(
'Zipfile %s.zip does not expand to a '
'single subdirectory %s/' % (uid, uid)
)
yield pkg_xml, zf, relpath
# Don't recurse into svn subdirectories:
try:
subdirs.remove('.svn')
except ValueError:
pass
######################################################################
# Main:
######################################################################
# There should be a command-line interface
# Aliases
_downloader = Downloader()
download = _downloader.download
def download_shell():
DownloaderShell(_downloader).run()
def download_gui():
DownloaderGUI(_downloader).mainloop()
def update():
_downloader.update()
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option(
"-d",
"--dir",
dest="dir",
help="download package to directory DIR",
metavar="DIR",
)
parser.add_option(
"-q",
"--quiet",
dest="quiet",
action="store_true",
default=False,
help="work quietly",
)
parser.add_option(
"-f",
"--force",
dest="force",
action="store_true",
default=False,
help="download even if already installed",
)
parser.add_option(
"-e",
"--exit-on-error",
dest="halt_on_error",
action="store_true",
default=False,
help="exit if an error occurs",
)
parser.add_option(
"-u",
"--url",
dest="server_index_url",
default=os.environ.get('NLTK_DOWNLOAD_URL'),
help="download server index url",
)
(options, args) = parser.parse_args()
downloader = Downloader(server_index_url=options.server_index_url)
if args:
for pkg_id in args:
rv = downloader.download(
info_or_id=pkg_id,
download_dir=options.dir,
quiet=options.quiet,
force=options.force,
halt_on_error=options.halt_on_error,
)
if rv == False and options.halt_on_error:
break
else:
downloader.download(
download_dir=options.dir,
quiet=options.quiet,
force=options.force,
halt_on_error=options.halt_on_error,
)
| ./CrossVul/dataset_final_sorted/CWE-22/py/good_994_0 |
crossvul-python_data_bad_1777_0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2015 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
"""This class provides the api to talk to the client.
It will then call the cherrymodel, to get the
requested information"""
import os # shouldn't have to list any folder in the future!
import json
import cherrypy
import codecs
import sys
try:
from urllib.parse import unquote
except ImportError:
from backport.urllib.parse import unquote
try:
from urllib import parse
except ImportError:
from backport.urllib import parse
import audiotranscode
from tinytag import TinyTag
from cherrymusicserver import userdb
from cherrymusicserver import log
from cherrymusicserver import albumartfetcher
from cherrymusicserver import service
from cherrymusicserver.pathprovider import readRes
from cherrymusicserver.pathprovider import albumArtFilePath
import cherrymusicserver as cherry
import cherrymusicserver.metainfo as metainfo
from cherrymusicserver.util import Performance, MemoryZipFile
from cherrymusicserver.ext import zipstream
import time
debug = True
@service.user(model='cherrymodel', playlistdb='playlist',
useroptions='useroptions', userdb='users')
class HTTPHandler(object):
def __init__(self, config):
self.config = config
template_main = 'res/dist/main.html'
template_login = 'res/login.html'
template_firstrun = 'res/firstrun.html'
self.mainpage = readRes(template_main)
self.loginpage = readRes(template_login)
self.firstrunpage = readRes(template_firstrun)
self.handlers = {
'search': self.api_search,
'rememberplaylist': self.api_rememberplaylist,
'saveplaylist': self.api_saveplaylist,
'loadplaylist': self.api_loadplaylist,
'generaterandomplaylist': self.api_generaterandomplaylist,
'deleteplaylist': self.api_deleteplaylist,
'getmotd': self.api_getmotd,
'restoreplaylist': self.api_restoreplaylist,
'getplayables': self.api_getplayables,
'getuserlist': self.api_getuserlist,
'adduser': self.api_adduser,
'userdelete': self.api_userdelete,
'userchangepassword': self.api_userchangepassword,
'showplaylists': self.api_showplaylists,
'logout': self.api_logout,
'downloadpls': self.api_downloadpls,
'downloadm3u': self.api_downloadm3u,
'getsonginfo': self.api_getsonginfo,
'getencoders': self.api_getencoders,
'getdecoders': self.api_getdecoders,
'transcodingenabled': self.api_transcodingenabled,
'updatedb': self.api_updatedb,
'getconfiguration': self.api_getconfiguration,
'compactlistdir': self.api_compactlistdir,
'listdir': self.api_listdir,
'fetchalbumart': self.api_fetchalbumart,
'fetchalbumarturls': self.api_fetchalbumarturls,
'albumart_set': self.api_albumart_set,
'heartbeat': self.api_heartbeat,
'getuseroptions': self.api_getuseroptions,
'setuseroption': self.api_setuseroption,
'changeplaylist': self.api_changeplaylist,
'downloadcheck': self.api_downloadcheck,
'setuseroptionfor': self.api_setuseroptionfor,
}
def issecure(self, url):
return parse.urlparse(url).scheme == 'https'
def getBaseUrl(self, redirect_unencrypted=False):
ipAndPort = parse.urlparse(cherrypy.url()).netloc
is_secure_connection = self.issecure(cherrypy.url())
ssl_enabled = cherry.config['server.ssl_enabled']
if ssl_enabled and not is_secure_connection:
log.d(_('Not secure, redirecting...'))
ip = ipAndPort[:ipAndPort.rindex(':')]
url = 'https://' + ip + ':' + str(cherry.config['server.ssl_port'])
if redirect_unencrypted:
raise cherrypy.HTTPRedirect(url, 302)
else:
url = 'http://' + ipAndPort
return url
def index(self, *args, **kwargs):
self.getBaseUrl(redirect_unencrypted=True)
firstrun = 0 == self.userdb.getUserCount()
show_page = self.mainpage #generated main.html from devel.html
if 'devel' in kwargs:
#reload pages everytime in devel mode
show_page = readRes('res/devel.html')
self.loginpage = readRes('res/login.html')
self.firstrunpage = readRes('res/firstrun.html')
if 'login' in kwargs:
username = kwargs.get('username', '')
password = kwargs.get('password', '')
login_action = kwargs.get('login', '')
if login_action == 'login':
self.session_auth(username, password)
if cherrypy.session['username']:
username = cherrypy.session['username']
log.i(_('user {name} just logged in.').format(name=username))
elif login_action == 'create admin user':
if firstrun:
if username.strip() and password.strip():
self.userdb.addUser(username, password, True)
self.session_auth(username, password)
return show_page
else:
return "No, you can't."
if firstrun:
return self.firstrunpage
else:
if self.isAuthorized():
return show_page
else:
return self.loginpage
index.exposed = True
def isAuthorized(self):
try:
sessionUsername = cherrypy.session.get('username', None)
sessionUserId = cherrypy.session.get('userid', -1)
nameById = self.userdb.getNameById(sessionUserId)
except (UnicodeDecodeError, ValueError) as e:
# workaround for python2/python3 jump, filed bug in cherrypy
# https://bitbucket.org/cherrypy/cherrypy/issue/1216/sessions-python2-3-compability-unsupported
log.w(_('''
Dropping all sessions! Try not to change between python 2 and 3,
everybody has to relogin now.'''))
cherrypy.session.delete()
sessionUsername = None
if sessionUsername is None:
if self.autoLoginActive():
cherrypy.session['username'] = self.userdb.getNameById(1)
cherrypy.session['userid'] = 1
cherrypy.session['admin'] = True
return True
else:
return False
elif sessionUsername != nameById:
self.api_logout(value=None)
return False
return True
def autoLoginActive(self):
is_loopback = cherrypy.request.remote.ip in ('127.0.0.1', '::1')
if is_loopback and cherry.config['server.localhost_auto_login']:
return True
return False
def session_auth(self, username, password):
user = self.userdb.auth(username, password)
allow_remote = cherry.config['server.permit_remote_admin_login']
is_loopback = cherrypy.request.remote.ip in ('127.0.0.1', '::1')
if not is_loopback and user.isadmin and not allow_remote:
log.i(_('Rejected remote admin login from user: {name}').format(name=user.name))
user = userdb.User.nobody()
cherrypy.session['username'] = user.name
cherrypy.session['userid'] = user.uid
cherrypy.session['admin'] = user.isadmin
def getUserId(self):
try:
return cherrypy.session['userid']
except KeyError:
cherrypy.lib.sessions.expire()
cherrypy.HTTPRedirect(cherrypy.url(), 302)
return ''
def trans(self, newformat, *path, **params):
''' Transcodes the track given as ``path`` into ``newformat``.
Streams the response of the corresponding
``audiotranscode.AudioTranscode().transcodeStream()`` call.
params:
bitrate: int for kbps. None or < 1 for default
'''
if not self.isAuthorized():
raise cherrypy.HTTPRedirect(self.getBaseUrl(), 302)
cherrypy.session.release_lock()
if cherry.config['media.transcode'] and path:
# bitrate
bitrate = params.pop('bitrate', None) or None # catch empty strings
if bitrate:
try:
bitrate = max(0, int(bitrate)) or None # None if < 1
except (TypeError, ValueError):
raise cherrypy.HTTPError(400, "Bad query: "
"bitrate ({0!r}) must be an integer".format(str(bitrate)))
# path
path = os.path.sep.join(path)
if sys.version_info < (3, 0): # workaround for #327 (cherrypy issue)
path = path.decode('utf-8') # make it work with non-ascii
else:
path = codecs.decode(codecs.encode(path, 'latin1'), 'utf-8')
fullpath = os.path.join(cherry.config['media.basedir'], path)
starttime = int(params.pop('starttime', 0))
transcoder = audiotranscode.AudioTranscode()
mimetype = audiotranscode.mime_type(newformat)
cherrypy.response.headers["Content-Type"] = mimetype
try:
return transcoder.transcode_stream(fullpath, newformat,
bitrate=bitrate, starttime=starttime)
except (audiotranscode.TranscodeError, IOError) as e:
raise cherrypy.HTTPError(404, e.value)
trans.exposed = True
trans._cp_config = {'response.stream': True}
def api(self, *args, **kwargs):
"""calls the appropriate handler from the handlers
dict, if available. handlers having noauth set to
true do not need authentification to work.
"""
#check action
action = args[0] if args else ''
if not action in self.handlers:
return "Error: no such action. '%s'" % action
#authorize if not explicitly deactivated
handler = self.handlers[action]
needsAuth = not ('noauth' in dir(handler) and handler.noauth)
if needsAuth and not self.isAuthorized():
raise cherrypy.HTTPError(401, 'Unauthorized')
handler_args = {}
if 'data' in kwargs:
handler_args = json.loads(kwargs['data'])
is_binary = ('binary' in dir(handler) and handler.binary)
if is_binary:
return handler(**handler_args)
else:
return json.dumps({'data': handler(**handler_args)})
api.exposed = True
def download_check_files(self, filelist):
# only admins and allowed users may download
if not cherrypy.session['admin']:
uo = self.useroptions.forUser(self.getUserId())
if not uo.getOptionValue('media.may_download'):
return 'not_permitted'
# make sure nobody tries to escape from basedir
for f in filelist:
if '/../' in f:
return 'invalid_file'
# make sure all files are smaller than maximum download size
size_limit = cherry.config['media.maximum_download_size']
try:
if self.model.file_size_within_limit(filelist, size_limit):
return 'ok'
else:
return 'too_big'
except OSError as e: # use OSError for python2 compatibility
return str(e)
def api_downloadcheck(self, filelist):
status = self.download_check_files(filelist)
if status == 'not_permitted':
return """You are not allowed to download files."""
elif status == 'invalid_file':
return "Error: invalid filename found in {list}".format(list=filelist)
elif status == 'too_big':
size_limit = cherry.config['media.maximum_download_size']
return """Can't download: Playlist is bigger than {maxsize} mB.
The server administrator can change this configuration.
""".format(maxsize=size_limit/1024/1024)
elif status == 'ok':
return status
else:
message = "Error status check for download: {status!r}".format(status=status)
log.e(message)
return message
def download(self, value):
if not self.isAuthorized():
raise cherrypy.HTTPError(401, 'Unauthorized')
filelist = [filepath for filepath in json.loads(unquote(value))]
dlstatus = self.download_check_files(filelist)
if dlstatus == 'ok':
_save_and_release_session()
zipmime = 'application/x-zip-compressed'
cherrypy.response.headers["Content-Type"] = zipmime
zipname = 'attachment; filename="music.zip"'
cherrypy.response.headers['Content-Disposition'] = zipname
basedir = cherry.config['media.basedir']
fullpath_filelist = [os.path.join(basedir, f) for f in filelist]
return zipstream.ZipStream(fullpath_filelist)
else:
return dlstatus
download.exposed = True
download._cp_config = {'response.stream': True}
def api_getuseroptions(self):
uo = self.useroptions.forUser(self.getUserId())
uco = uo.getChangableOptions()
if cherrypy.session['admin']:
uco['media'].update({'may_download': True})
else:
uco['media'].update({'may_download': uo.getOptionValue('media.may_download')})
return uco
def api_heartbeat(self):
uo = self.useroptions.forUser(self.getUserId())
uo.setOption('last_time_online', int(time.time()))
def api_setuseroption(self, optionkey, optionval):
uo = self.useroptions.forUser(self.getUserId())
uo.setOption(optionkey, optionval)
return "success"
def api_setuseroptionfor(self, userid, optionkey, optionval):
if cherrypy.session['admin']:
uo = self.useroptions.forUser(userid)
uo.setOption(optionkey, optionval)
return "success"
else:
return "error: not permitted. Only admins can change other users options"
def api_fetchalbumarturls(self, searchterm):
if not cherrypy.session['admin']:
raise cherrypy.HTTPError(401, 'Unauthorized')
_save_and_release_session()
fetcher = albumartfetcher.AlbumArtFetcher()
imgurls = fetcher.fetchurls(searchterm)
# show no more than 10 images
return imgurls[:min(len(imgurls), 10)]
def api_albumart_set(self, directory, imageurl):
if not cherrypy.session['admin']:
raise cherrypy.HTTPError(401, 'Unauthorized')
b64imgpath = albumArtFilePath(directory)
fetcher = albumartfetcher.AlbumArtFetcher()
data, header = fetcher.retrieveData(imageurl)
self.albumartcache_save(b64imgpath, data)
def api_fetchalbumart(self, directory):
_save_and_release_session()
default_folder_image = "../res/img/folder.png"
log.i('Fetching album art for: %s' % directory)
filepath = os.path.join(cherry.config['media.basedir'], directory)
if os.path.isfile(filepath):
# if the given path is a file, try to get the image from ID3
tag = TinyTag.get(filepath, image=True)
image_data = tag.get_image()
if image_data:
log.d('Image found in tag.')
header = {'Content-Type': 'image/jpg', 'Content-Length': len(image_data)}
cherrypy.response.headers.update(header)
return image_data
else:
# if the file does not contain an image, display the image of the
# parent directory
directory = os.path.dirname(directory)
#try getting a cached album art image
b64imgpath = albumArtFilePath(directory)
img_data = self.albumartcache_load(b64imgpath)
if img_data:
cherrypy.response.headers["Content-Length"] = len(img_data)
return img_data
#try getting album art inside local folder
fetcher = albumartfetcher.AlbumArtFetcher()
localpath = os.path.join(cherry.config['media.basedir'], directory)
header, data, resized = fetcher.fetchLocal(localpath)
if header:
if resized:
#cache resized image for next time
self.albumartcache_save(b64imgpath, data)
cherrypy.response.headers.update(header)
return data
elif cherry.config['media.fetch_album_art']:
#fetch album art from online source
try:
foldername = os.path.basename(directory)
keywords = foldername
log.i(_("Fetching album art for keywords {keywords!r}").format(keywords=keywords))
header, data = fetcher.fetch(keywords)
if header:
cherrypy.response.headers.update(header)
self.albumartcache_save(b64imgpath, data)
return data
else:
# albumart fetcher failed, so we serve a standard image
raise cherrypy.HTTPRedirect(default_folder_image, 302)
except:
# albumart fetcher threw exception, so we serve a standard image
raise cherrypy.HTTPRedirect(default_folder_image, 302)
else:
# no local album art found, online fetching deactivated, show default
raise cherrypy.HTTPRedirect(default_folder_image, 302)
api_fetchalbumart.noauth = True
api_fetchalbumart.binary = True
def albumartcache_load(self, imgb64path):
if os.path.exists(imgb64path):
with open(imgb64path, 'rb') as f:
return f.read()
def albumartcache_save(self, path, data):
with open(path, 'wb') as f:
f.write(data)
def api_compactlistdir(self, directory, filterstr=None):
try:
files_to_list = self.model.listdir(directory, filterstr)
except ValueError:
raise cherrypy.HTTPError(400, 'Bad Request')
return [entry.to_dict() for entry in files_to_list]
def api_listdir(self, directory):
try:
return [entry.to_dict() for entry in self.model.listdir(directory)]
except ValueError:
raise cherrypy.HTTPError(400, 'Bad Request')
def api_search(self, searchstring):
if not searchstring.strip():
jsonresults = '[]'
else:
with Performance(_('processing whole search request')):
searchresults = self.model.search(searchstring.strip())
with Performance(_('rendering search results as json')):
jsonresults = [entry.to_dict() for entry in searchresults]
return jsonresults
def api_rememberplaylist(self, playlist):
cherrypy.session['playlist'] = playlist
def api_saveplaylist(self, playlist, public, playlistname, overwrite=False):
res = self.playlistdb.savePlaylist(
userid=self.getUserId(),
public=1 if public else 0,
playlist=playlist,
playlisttitle=playlistname,
overwrite=overwrite)
if res == "success":
return res
else:
raise cherrypy.HTTPError(400, res)
def api_deleteplaylist(self, playlistid):
res = self.playlistdb.deletePlaylist(playlistid,
self.getUserId(),
override_owner=False)
if res == "success":
return res
else:
# not the ideal status code but we don't know the actual
# cause without parsing res
raise cherrypy.HTTPError(400, res)
def api_loadplaylist(self, playlistid):
return [entry.to_dict() for entry in self.playlistdb.loadPlaylist(
playlistid=playlistid,
userid=self.getUserId()
)]
def api_generaterandomplaylist(self):
return [entry.to_dict() for entry in self.model.randomMusicEntries(50)]
def api_changeplaylist(self, plid, attribute, value):
if attribute == 'public':
is_valid = type(value) == bool and type(plid) == int
if is_valid:
return self.playlistdb.setPublic(userid=self.getUserId(),
plid=plid,
public=value)
def api_getmotd(self):
if cherrypy.session['admin'] and cherry.config['general.update_notification']:
_save_and_release_session()
new_versions = self.model.check_for_updates()
if new_versions:
newest_version = new_versions[0]['version']
features = []
fixes = []
for version in new_versions:
for update in version['features']:
if update.startswith('FEATURE:'):
features.append(update[len('FEATURE:'):])
elif update.startswith('FIX:'):
fixes.append(update[len('FIX:'):])
elif update.startswith('FIXED:'):
fixes.append(update[len('FIXED:'):])
retdata = {'type': 'update', 'data': {}}
retdata['data']['version'] = newest_version
retdata['data']['features'] = features
retdata['data']['fixes'] = fixes
return retdata
return {'type': 'wisdom', 'data': self.model.motd()}
def api_restoreplaylist(self):
session_playlist = cherrypy.session.get('playlist', [])
return session_playlist
def api_getplayables(self):
"""DEPRECATED"""
return json.dumps(cherry.config['media.playable'])
def api_getuserlist(self):
if cherrypy.session['admin']:
userlist = self.userdb.getUserList()
for user in userlist:
if user['id'] == cherrypy.session['userid']:
user['deletable'] = False
user_options = self.useroptions.forUser(user['id'])
t = user_options.getOptionValue('last_time_online')
may_download = user_options.getOptionValue('media.may_download')
user['last_time_online'] = t
user['may_download'] = may_download
sortfunc = lambda user: user['last_time_online']
userlist = sorted(userlist, key=sortfunc, reverse=True)
return json.dumps({'time': int(time.time()),
'userlist': userlist})
else:
return json.dumps({'time': 0, 'userlist': []})
def api_adduser(self, username, password, isadmin):
if cherrypy.session['admin']:
if self.userdb.addUser(username, password, isadmin):
return 'added new user: %s' % username
else:
return 'error, cannot add new user!' % username
else:
return "You didn't think that would work, did you?"
def api_userchangepassword(self, oldpassword, newpassword, username=''):
isself = username == ''
if isself:
username = cherrypy.session['username']
authed_user = self.userdb.auth(username, oldpassword)
is_authenticated = userdb.User.nobody() != authed_user
if not is_authenticated:
raise cherrypy.HTTPError(403, "Forbidden")
if isself or cherrypy.session['admin']:
return self.userdb.changePassword(username, newpassword)
else:
raise cherrypy.HTTPError(403, "Forbidden")
def api_userdelete(self, userid):
is_self = cherrypy.session['userid'] == userid
if cherrypy.session['admin'] and not is_self:
deleted = self.userdb.deleteUser(userid)
return 'success' if deleted else 'failed'
else:
return "You didn't think that would work, did you?"
def api_showplaylists(self, sortby="created", filterby=''):
playlists = self.playlistdb.showPlaylists(self.getUserId(), filterby)
curr_time = int(time.time())
is_reverse = False
#translate userids to usernames:
for pl in playlists:
pl['username'] = self.userdb.getNameById(pl['userid'])
pl['type'] = 'playlist'
pl['age'] = curr_time - pl['created']
if sortby[0] == '-':
is_reverse = True
sortby = sortby[1:]
if not sortby in ('username', 'age', 'title', 'default'):
sortby = 'created'
if sortby == 'default':
sortby = 'age'
is_reverse = False
playlists = sorted(playlists, key=lambda x: x[sortby], reverse = is_reverse)
return playlists
def api_logout(self):
cherrypy.lib.sessions.expire()
api_logout.no_auth = True
def api_downloadpls(self, plid, hostaddr):
userid = self.getUserId()
pls = self.playlistdb.createPLS(plid=plid, userid=userid, addrstr=hostaddr)
name = self.playlistdb.getName(plid, userid)
if pls and name:
return self.serve_string_as_file(pls, name+'.pls')
api_downloadpls.binary = True
def api_downloadm3u(self, plid, hostaddr):
userid = self.getUserId()
pls = self.playlistdb.createM3U(plid=plid, userid=userid, addrstr=hostaddr)
name = self.playlistdb.getName(plid, userid)
if pls and name:
return self.serve_string_as_file(pls, name+'.m3u')
api_downloadm3u.binary = True
def export_playlists(self, format, all=False, hostaddr=''):
userid = self.getUserId()
if not userid:
raise cherrypy.HTTPError(401, _("Please log in"))
hostaddr = (hostaddr.strip().rstrip('/') + cherry.config['server.rootpath']).rstrip('/')
format = format.lower()
if format == 'm3u':
filemaker = self.playlistdb.createM3U
elif format == 'pls':
filemaker = self.playlistdb.createPLS
else:
raise cherrypy.HTTPError(400,
_('Unknown playlist format: {format!r}').format(format=format))
playlists = self.playlistdb.showPlaylists(userid, include_public=all)
if not playlists:
raise cherrypy.HTTPError(404, _('No playlists found'))
with MemoryZipFile() as zip:
for pl in playlists:
plid = pl['plid']
plstr = filemaker(plid=plid, userid=userid, addrstr=hostaddr)
name = self.playlistdb.getName(plid, userid) + '.' + format
if not pl['owner']:
username = self.userdb.getNameById(pl['userid'])
name = username + '/' + name
zip.writestr(name, plstr)
zipmime = 'application/x-zip-compressed'
zipname = 'attachment; filename="playlists.zip"'
cherrypy.response.headers["Content-Type"] = zipmime
cherrypy.response.headers['Content-Disposition'] = zipname
return zip.getbytes()
export_playlists.exposed = True
def api_getsonginfo(self, path):
basedir = cherry.config['media.basedir']
abspath = os.path.join(basedir, path)
return json.dumps(metainfo.getSongInfo(abspath).dict())
def api_getencoders(self):
return json.dumps(audiotranscode.getEncoders())
def api_getdecoders(self):
return json.dumps(audiotranscode.getDecoders())
def api_transcodingenabled(self):
return json.dumps(cherry.config['media.transcode'])
def api_updatedb(self):
self.model.updateLibrary()
return 'success'
def api_getconfiguration(self):
clientconfigkeys = {
'transcodingenabled': cherry.config['media.transcode'],
'fetchalbumart': cherry.config['media.fetch_album_art'],
'isadmin': cherrypy.session['admin'],
'username': cherrypy.session['username'],
'servepath': 'serve/',
'transcodepath': 'trans/',
'auto_login': self.autoLoginActive(),
'version': cherry.REPO_VERSION or cherry.VERSION,
}
if cherry.config['media.transcode']:
decoders = list(self.model.transcoder.available_decoder_formats())
clientconfigkeys['getdecoders'] = decoders
encoders = list(self.model.transcoder.available_encoder_formats())
clientconfigkeys['getencoders'] = encoders
else:
clientconfigkeys['getdecoders'] = []
clientconfigkeys['getencoders'] = []
return clientconfigkeys
def serve_string_as_file(self, string, filename):
content_disposition = 'attachment; filename="'+filename+'"'
cherrypy.response.headers["Content-Type"] = "application/x-download"
cherrypy.response.headers["Content-Disposition"] = content_disposition
return codecs.encode(string, "UTF-8")
def _save_and_release_session():
""" workaround to cleanly release FileSessions in Cherrypy >= 3.3
From https://github.com/devsnd/cherrymusic/issues/483:
> CherryPy >=3.3.0 (up to current version, 3.6) makes it impossible to
> explicitly release FileSession locks, because:
> 1. FileSession.save() asserts that the session is locked; and
> 2. _cptools.SessionTool always adds a hook to call sessions.save
> before the response is finalized.
> If we still want to release the session in a controller, I guess the
> best way to work around this is to remove the hook before the
> controller returns:
"""
cherrypy.session.save()
hooks = cherrypy.serving.request.hooks['before_finalize']
forbidden = cherrypy.lib.sessions.save
hooks[:] = [h for h in hooks if h.callback is not forbidden]
# there's likely only one hook, since a 2nd call to save would always fail;
# but let's be safe, and block all calls to save :)
| ./CrossVul/dataset_final_sorted/CWE-22/py/bad_1777_0 |
crossvul-python_data_bad_994_0 | # Natural Language Toolkit: Corpus & Model Downloader
#
# Copyright (C) 2001-2019 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
The NLTK corpus and module downloader. This module defines several
interfaces which can be used to download corpora, models, and other
data packages that can be used with NLTK.
Downloading Packages
====================
If called with no arguments, ``download()`` will display an interactive
interface which can be used to download and install new packages.
If Tkinter is available, then a graphical interface will be shown,
otherwise a simple text interface will be provided.
Individual packages can be downloaded by calling the ``download()``
function with a single argument, giving the package identifier for the
package that should be downloaded:
>>> download('treebank') # doctest: +SKIP
[nltk_data] Downloading package 'treebank'...
[nltk_data] Unzipping corpora/treebank.zip.
NLTK also provides a number of \"package collections\", consisting of
a group of related packages. To download all packages in a
colleciton, simply call ``download()`` with the collection's
identifier:
>>> download('all-corpora') # doctest: +SKIP
[nltk_data] Downloading package 'abc'...
[nltk_data] Unzipping corpora/abc.zip.
[nltk_data] Downloading package 'alpino'...
[nltk_data] Unzipping corpora/alpino.zip.
...
[nltk_data] Downloading package 'words'...
[nltk_data] Unzipping corpora/words.zip.
Download Directory
==================
By default, packages are installed in either a system-wide directory
(if Python has sufficient access to write to it); or in the current
user's home directory. However, the ``download_dir`` argument may be
used to specify a different installation target, if desired.
See ``Downloader.default_download_dir()`` for more a detailed
description of how the default download directory is chosen.
NLTK Download Server
====================
Before downloading any packages, the corpus and module downloader
contacts the NLTK download server, to retrieve an index file
describing the available packages. By default, this index file is
loaded from ``https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml``.
If necessary, it is possible to create a new ``Downloader`` object,
specifying a different URL for the package index file.
Usage::
python nltk/downloader.py [-d DATADIR] [-q] [-f] [-k] PACKAGE_IDS
or::
python -m nltk.downloader [-d DATADIR] [-q] [-f] [-k] PACKAGE_IDS
"""
# ----------------------------------------------------------------------
from __future__ import print_function, division, unicode_literals
"""
0 1 2 3
[label][----][label][----]
[column ][column ]
Notes
=====
Handling data files.. Some questions:
* Should the data files be kept zipped or unzipped? I say zipped.
* Should the data files be kept in svn at all? Advantages: history;
automatic version numbers; 'svn up' could be used rather than the
downloader to update the corpora. Disadvantages: they're big,
which makes working from svn a bit of a pain. And we're planning
to potentially make them much bigger. I don't think we want
people to have to download 400MB corpora just to use nltk from svn.
* Compromise: keep the data files in trunk/data rather than in
trunk/nltk. That way you can check them out in svn if you want
to; but you don't need to, and you can use the downloader instead.
* Also: keep models in mind. When we change the code, we'd
potentially like the models to get updated. This could require a
little thought.
* So.. let's assume we have a trunk/data directory, containing a bunch
of packages. The packages should be kept as zip files, because we
really shouldn't be editing them much (well -- we may edit models
more, but they tend to be binary-ish files anyway, where diffs
aren't that helpful). So we'll have trunk/data, with a bunch of
files like abc.zip and treebank.zip and propbank.zip. For each
package we could also have eg treebank.xml and propbank.xml,
describing the contents of the package (name, copyright, license,
etc). Collections would also have .xml files. Finally, we would
pull all these together to form a single index.xml file. Some
directory structure wouldn't hurt. So how about::
/trunk/data/ ....................... root of data svn
index.xml ........................ main index file
src/ ............................. python scripts
packages/ ........................ dir for packages
corpora/ ....................... zip & xml files for corpora
grammars/ ...................... zip & xml files for grammars
taggers/ ....................... zip & xml files for taggers
tokenizers/ .................... zip & xml files for tokenizers
etc.
collections/ ..................... xml files for collections
Where the root (/trunk/data) would contain a makefile; and src/
would contain a script to update the info.xml file. It could also
contain scripts to rebuild some of the various model files. The
script that builds index.xml should probably check that each zip
file expands entirely into a single subdir, whose name matches the
package's uid.
Changes I need to make:
- in index: change "size" to "filesize" or "compressed-size"
- in index: add "unzipped-size"
- when checking status: check both compressed & uncompressed size.
uncompressed size is important to make sure we detect a problem
if something got partially unzipped. define new status values
to differentiate stale vs corrupt vs corruptly-uncompressed??
(we shouldn't need to re-download the file if the zip file is ok
but it didn't get uncompressed fully.)
- add other fields to the index: author, license, copyright, contact,
etc.
the current grammars/ package would become a single new package (eg
toy-grammars or book-grammars).
xml file should have:
- authorship info
- license info
- copyright info
- contact info
- info about what type of data/annotation it contains?
- recommended corpus reader?
collections can contain other collections. they can also contain
multiple package types (corpora & models). Have a single 'basics'
package that includes everything we talk about in the book?
n.b.: there will have to be a fallback to the punkt tokenizer, in case
they didn't download that model.
default: unzip or not?
"""
import time, os, zipfile, sys, textwrap, threading, itertools, shutil, functools
import subprocess
from hashlib import md5
from xml.etree import ElementTree
try:
TKINTER = True
from six.moves.tkinter import (
Tk,
Frame,
Label,
Entry,
Button,
Canvas,
Menu,
IntVar,
TclError,
)
from six.moves.tkinter_messagebox import showerror
from nltk.draw.table import Table
from nltk.draw.util import ShowText
except ImportError:
TKINTER = False
TclError = ValueError
from six import string_types, text_type
from six.moves import input
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import HTTPError, URLError
import nltk
from nltk.compat import python_2_unicode_compatible
# urllib2 = nltk.internals.import_from_stdlib('urllib2')
######################################################################
# Directory entry objects (from the data server's index file)
######################################################################
@python_2_unicode_compatible
class Package(object):
"""
A directory entry for a downloadable package. These entries are
extracted from the XML index file that is downloaded by
``Downloader``. Each package consists of a single file; but if
that file is a zip file, then it can be automatically decompressed
when the package is installed.
"""
def __init__(
self,
id,
url,
name=None,
subdir='',
size=None,
unzipped_size=None,
checksum=None,
svn_revision=None,
copyright='Unknown',
contact='Unknown',
license='Unknown',
author='Unknown',
unzip=True,
**kw
):
self.id = id
"""A unique identifier for this package."""
self.name = name or id
"""A string name for this package."""
self.subdir = subdir
"""The subdirectory where this package should be installed.
E.g., ``'corpora'`` or ``'taggers'``."""
self.url = url
"""A URL that can be used to download this package's file."""
self.size = int(size)
"""The filesize (in bytes) of the package file."""
self.unzipped_size = int(unzipped_size)
"""The total filesize of the files contained in the package's
zipfile."""
self.checksum = checksum
"""The MD-5 checksum of the package file."""
self.svn_revision = svn_revision
"""A subversion revision number for this package."""
self.copyright = copyright
"""Copyright holder for this package."""
self.contact = contact
"""Name & email of the person who should be contacted with
questions about this package."""
self.license = license
"""License information for this package."""
self.author = author
"""Author of this package."""
ext = os.path.splitext(url.split('/')[-1])[1]
self.filename = os.path.join(subdir, id + ext)
"""The filename that should be used for this package's file. It
is formed by joining ``self.subdir`` with ``self.id``, and
using the same extension as ``url``."""
self.unzip = bool(int(unzip)) # '0' or '1'
"""A flag indicating whether this corpus should be unzipped by
default."""
# Include any other attributes provided by the XML file.
self.__dict__.update(kw)
@staticmethod
def fromxml(xml):
if isinstance(xml, string_types):
xml = ElementTree.parse(xml)
for key in xml.attrib:
xml.attrib[key] = text_type(xml.attrib[key])
return Package(**xml.attrib)
def __lt__(self, other):
return self.id < other.id
def __repr__(self):
return '<Package %s>' % self.id
@python_2_unicode_compatible
class Collection(object):
"""
A directory entry for a collection of downloadable packages.
These entries are extracted from the XML index file that is
downloaded by ``Downloader``.
"""
def __init__(self, id, children, name=None, **kw):
self.id = id
"""A unique identifier for this collection."""
self.name = name or id
"""A string name for this collection."""
self.children = children
"""A list of the ``Collections`` or ``Packages`` directly
contained by this collection."""
self.packages = None
"""A list of ``Packages`` contained by this collection or any
collections it recursively contains."""
# Include any other attributes provided by the XML file.
self.__dict__.update(kw)
@staticmethod
def fromxml(xml):
if isinstance(xml, string_types):
xml = ElementTree.parse(xml)
for key in xml.attrib:
xml.attrib[key] = text_type(xml.attrib[key])
children = [child.get('ref') for child in xml.findall('item')]
return Collection(children=children, **xml.attrib)
def __lt__(self, other):
return self.id < other.id
def __repr__(self):
return '<Collection %s>' % self.id
######################################################################
# Message Passing Objects
######################################################################
class DownloaderMessage(object):
"""A status message object, used by ``incr_download`` to
communicate its progress."""
class StartCollectionMessage(DownloaderMessage):
"""Data server has started working on a collection of packages."""
def __init__(self, collection):
self.collection = collection
class FinishCollectionMessage(DownloaderMessage):
"""Data server has finished working on a collection of packages."""
def __init__(self, collection):
self.collection = collection
class StartPackageMessage(DownloaderMessage):
"""Data server has started working on a package."""
def __init__(self, package):
self.package = package
class FinishPackageMessage(DownloaderMessage):
"""Data server has finished working on a package."""
def __init__(self, package):
self.package = package
class StartDownloadMessage(DownloaderMessage):
"""Data server has started downloading a package."""
def __init__(self, package):
self.package = package
class FinishDownloadMessage(DownloaderMessage):
"""Data server has finished downloading a package."""
def __init__(self, package):
self.package = package
class StartUnzipMessage(DownloaderMessage):
"""Data server has started unzipping a package."""
def __init__(self, package):
self.package = package
class FinishUnzipMessage(DownloaderMessage):
"""Data server has finished unzipping a package."""
def __init__(self, package):
self.package = package
class UpToDateMessage(DownloaderMessage):
"""The package download file is already up-to-date"""
def __init__(self, package):
self.package = package
class StaleMessage(DownloaderMessage):
"""The package download file is out-of-date or corrupt"""
def __init__(self, package):
self.package = package
class ErrorMessage(DownloaderMessage):
"""Data server encountered an error"""
def __init__(self, package, message):
self.package = package
if isinstance(message, Exception):
self.message = str(message)
else:
self.message = message
class ProgressMessage(DownloaderMessage):
"""Indicates how much progress the data server has made"""
def __init__(self, progress):
self.progress = progress
class SelectDownloadDirMessage(DownloaderMessage):
"""Indicates what download directory the data server is using"""
def __init__(self, download_dir):
self.download_dir = download_dir
######################################################################
# NLTK Data Server
######################################################################
class Downloader(object):
"""
A class used to access the NLTK data server, which can be used to
download corpora and other data packages.
"""
# /////////////////////////////////////////////////////////////////
# Configuration
# /////////////////////////////////////////////////////////////////
INDEX_TIMEOUT = 60 * 60 # 1 hour
"""The amount of time after which the cached copy of the data
server index will be considered 'stale,' and will be
re-downloaded."""
DEFAULT_URL = 'https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml'
"""The default URL for the NLTK data server's index. An
alternative URL can be specified when creating a new
``Downloader`` object."""
# /////////////////////////////////////////////////////////////////
# Status Constants
# /////////////////////////////////////////////////////////////////
INSTALLED = 'installed'
"""A status string indicating that a package or collection is
installed and up-to-date."""
NOT_INSTALLED = 'not installed'
"""A status string indicating that a package or collection is
not installed."""
STALE = 'out of date'
"""A status string indicating that a package or collection is
corrupt or out-of-date."""
PARTIAL = 'partial'
"""A status string indicating that a collection is partially
installed (i.e., only some of its packages are installed.)"""
# /////////////////////////////////////////////////////////////////
# Cosntructor
# /////////////////////////////////////////////////////////////////
def __init__(self, server_index_url=None, download_dir=None):
self._url = server_index_url or self.DEFAULT_URL
"""The URL for the data server's index file."""
self._collections = {}
"""Dictionary from collection identifier to ``Collection``"""
self._packages = {}
"""Dictionary from package identifier to ``Package``"""
self._download_dir = download_dir
"""The default directory to which packages will be downloaded."""
self._index = None
"""The XML index file downloaded from the data server"""
self._index_timestamp = None
"""Time at which ``self._index`` was downloaded. If it is more
than ``INDEX_TIMEOUT`` seconds old, it will be re-downloaded."""
self._status_cache = {}
"""Dictionary from package/collection identifier to status
string (``INSTALLED``, ``NOT_INSTALLED``, ``STALE``, or
``PARTIAL``). Cache is used for packages only, not
collections."""
self._errors = None
"""Flag for telling if all packages got successfully downloaded or not."""
# decide where we're going to save things to.
if self._download_dir is None:
self._download_dir = self.default_download_dir()
# /////////////////////////////////////////////////////////////////
# Information
# /////////////////////////////////////////////////////////////////
def list(
self,
download_dir=None,
show_packages=True,
show_collections=True,
header=True,
more_prompt=False,
skip_installed=False,
):
lines = 0 # for more_prompt
if download_dir is None:
download_dir = self._download_dir
print('Using default data directory (%s)' % download_dir)
if header:
print('=' * (26 + len(self._url)))
print(' Data server index for <%s>' % self._url)
print('=' * (26 + len(self._url)))
lines += 3 # for more_prompt
stale = partial = False
categories = []
if show_packages:
categories.append('packages')
if show_collections:
categories.append('collections')
for category in categories:
print('%s:' % category.capitalize())
lines += 1 # for more_prompt
for info in sorted(getattr(self, category)(), key=str):
status = self.status(info, download_dir)
if status == self.INSTALLED and skip_installed:
continue
if status == self.STALE:
stale = True
if status == self.PARTIAL:
partial = True
prefix = {
self.INSTALLED: '*',
self.STALE: '-',
self.PARTIAL: 'P',
self.NOT_INSTALLED: ' ',
}[status]
name = textwrap.fill(
'-' * 27 + (info.name or info.id), 75, subsequent_indent=27 * ' '
)[27:]
print(' [%s] %s %s' % (prefix, info.id.ljust(20, '.'), name))
lines += len(name.split('\n')) # for more_prompt
if more_prompt and lines > 20:
user_input = input("Hit Enter to continue: ")
if user_input.lower() in ('x', 'q'):
return
lines = 0
print()
msg = '([*] marks installed packages'
if stale:
msg += '; [-] marks out-of-date or corrupt packages'
if partial:
msg += '; [P] marks partially installed collections'
print(textwrap.fill(msg + ')', subsequent_indent=' ', width=76))
def packages(self):
self._update_index()
return self._packages.values()
def corpora(self):
self._update_index()
return [pkg for (id, pkg) in self._packages.items() if pkg.subdir == 'corpora']
def models(self):
self._update_index()
return [pkg for (id, pkg) in self._packages.items() if pkg.subdir != 'corpora']
def collections(self):
self._update_index()
return self._collections.values()
# /////////////////////////////////////////////////////////////////
# Downloading
# /////////////////////////////////////////////////////////////////
def _info_or_id(self, info_or_id):
if isinstance(info_or_id, string_types):
return self.info(info_or_id)
else:
return info_or_id
# [xx] When during downloading is it 'safe' to abort? Only unsafe
# time is *during* an unzip -- we don't want to leave a
# partially-unzipped corpus in place because we wouldn't notice
# it. But if we had the exact total size of the unzipped corpus,
# then that would be fine. Then we could abort anytime we want!
# So this is really what we should do. That way the threaded
# downloader in the gui can just kill the download thread anytime
# it wants.
def incr_download(self, info_or_id, download_dir=None, force=False):
# If they didn't specify a download_dir, then use the default one.
if download_dir is None:
download_dir = self._download_dir
yield SelectDownloadDirMessage(download_dir)
# If they gave us a list of ids, then download each one.
if isinstance(info_or_id, (list, tuple)):
for msg in self._download_list(info_or_id, download_dir, force):
yield msg
return
# Look up the requested collection or package.
try:
info = self._info_or_id(info_or_id)
except (IOError, ValueError) as e:
yield ErrorMessage(None, 'Error loading %s: %s' % (info_or_id, e))
return
# Handle collections.
if isinstance(info, Collection):
yield StartCollectionMessage(info)
for msg in self.incr_download(info.children, download_dir, force):
yield msg
yield FinishCollectionMessage(info)
# Handle Packages (delegate to a helper function).
else:
for msg in self._download_package(info, download_dir, force):
yield msg
def _num_packages(self, item):
if isinstance(item, Package):
return 1
else:
return len(item.packages)
def _download_list(self, items, download_dir, force):
# Look up the requested items.
for i in range(len(items)):
try:
items[i] = self._info_or_id(items[i])
except (IOError, ValueError) as e:
yield ErrorMessage(items[i], e)
return
# Download each item, re-scaling their progress.
num_packages = sum(self._num_packages(item) for item in items)
progress = 0
for i, item in enumerate(items):
if isinstance(item, Package):
delta = 1.0 / num_packages
else:
delta = len(item.packages) / num_packages
for msg in self.incr_download(item, download_dir, force):
if isinstance(msg, ProgressMessage):
yield ProgressMessage(progress + msg.progress * delta)
else:
yield msg
progress += 100 * delta
def _download_package(self, info, download_dir, force):
yield StartPackageMessage(info)
yield ProgressMessage(0)
# Do we already have the current version?
status = self.status(info, download_dir)
if not force and status == self.INSTALLED:
yield UpToDateMessage(info)
yield ProgressMessage(100)
yield FinishPackageMessage(info)
return
# Remove the package from our status cache
self._status_cache.pop(info.id, None)
# Check for (and remove) any old/stale version.
filepath = os.path.join(download_dir, info.filename)
if os.path.exists(filepath):
if status == self.STALE:
yield StaleMessage(info)
os.remove(filepath)
# Ensure the download_dir exists
if not os.path.exists(download_dir):
os.mkdir(download_dir)
if not os.path.exists(os.path.join(download_dir, info.subdir)):
os.mkdir(os.path.join(download_dir, info.subdir))
# Download the file. This will raise an IOError if the url
# is not found.
yield StartDownloadMessage(info)
yield ProgressMessage(5)
try:
infile = urlopen(info.url)
with open(filepath, 'wb') as outfile:
# print info.size
num_blocks = max(1, info.size / (1024 * 16))
for block in itertools.count():
s = infile.read(1024 * 16) # 16k blocks.
outfile.write(s)
if not s:
break
if block % 2 == 0: # how often?
yield ProgressMessage(min(80, 5 + 75 * (block / num_blocks)))
infile.close()
except IOError as e:
yield ErrorMessage(
info,
'Error downloading %r from <%s>:' '\n %s' % (info.id, info.url, e),
)
return
yield FinishDownloadMessage(info)
yield ProgressMessage(80)
# If it's a zipfile, uncompress it.
if info.filename.endswith('.zip'):
zipdir = os.path.join(download_dir, info.subdir)
# Unzip if we're unzipping by default; *or* if it's already
# been unzipped (presumably a previous version).
if info.unzip or os.path.exists(os.path.join(zipdir, info.id)):
yield StartUnzipMessage(info)
for msg in _unzip_iter(filepath, zipdir, verbose=False):
# Somewhat of a hack, but we need a proper package reference
msg.package = info
yield msg
yield FinishUnzipMessage(info)
yield FinishPackageMessage(info)
def download(
self,
info_or_id=None,
download_dir=None,
quiet=False,
force=False,
prefix='[nltk_data] ',
halt_on_error=True,
raise_on_error=False,
print_error_to=sys.stderr,
):
print_to = functools.partial(print, file=print_error_to)
# If no info or id is given, then use the interactive shell.
if info_or_id is None:
# [xx] hmm -- changing self._download_dir here seems like
# the wrong thing to do. Maybe the _interactive_download
# function should make a new copy of self to use?
if download_dir is not None:
self._download_dir = download_dir
self._interactive_download()
return True
else:
# Define a helper function for displaying output:
def show(s, prefix2=''):
print_to(
textwrap.fill(
s,
initial_indent=prefix + prefix2,
subsequent_indent=prefix + prefix2 + ' ' * 4,
)
)
for msg in self.incr_download(info_or_id, download_dir, force):
# Error messages
if isinstance(msg, ErrorMessage):
show(msg.message)
if raise_on_error:
raise ValueError(msg.message)
if halt_on_error:
return False
self._errors = True
if not quiet:
print_to("Error installing package. Retry? [n/y/e]")
choice = input().strip()
if choice in ['y', 'Y']:
if not self.download(
msg.package.id,
download_dir,
quiet,
force,
prefix,
halt_on_error,
raise_on_error,
):
return False
elif choice in ['e', 'E']:
return False
# All other messages
if not quiet:
# Collection downloading messages:
if isinstance(msg, StartCollectionMessage):
show('Downloading collection %r' % msg.collection.id)
prefix += ' | '
print_to(prefix)
elif isinstance(msg, FinishCollectionMessage):
print_to(prefix)
prefix = prefix[:-4]
if self._errors:
show(
'Downloaded collection %r with errors'
% msg.collection.id
)
else:
show('Done downloading collection %s' % msg.collection.id)
# Package downloading messages:
elif isinstance(msg, StartPackageMessage):
show(
'Downloading package %s to %s...'
% (msg.package.id, download_dir)
)
elif isinstance(msg, UpToDateMessage):
show('Package %s is already up-to-date!' % msg.package.id, ' ')
# elif isinstance(msg, StaleMessage):
# show('Package %s is out-of-date or corrupt' %
# msg.package.id, ' ')
elif isinstance(msg, StartUnzipMessage):
show('Unzipping %s.' % msg.package.filename, ' ')
# Data directory message:
elif isinstance(msg, SelectDownloadDirMessage):
download_dir = msg.download_dir
return True
def is_stale(self, info_or_id, download_dir=None):
return self.status(info_or_id, download_dir) == self.STALE
def is_installed(self, info_or_id, download_dir=None):
return self.status(info_or_id, download_dir) == self.INSTALLED
def clear_status_cache(self, id=None):
if id is None:
self._status_cache.clear()
else:
self._status_cache.pop(id, None)
def status(self, info_or_id, download_dir=None):
"""
Return a constant describing the status of the given package
or collection. Status can be one of ``INSTALLED``,
``NOT_INSTALLED``, ``STALE``, or ``PARTIAL``.
"""
if download_dir is None:
download_dir = self._download_dir
info = self._info_or_id(info_or_id)
# Handle collections:
if isinstance(info, Collection):
pkg_status = [self.status(pkg.id) for pkg in info.packages]
if self.STALE in pkg_status:
return self.STALE
elif self.PARTIAL in pkg_status:
return self.PARTIAL
elif self.INSTALLED in pkg_status and self.NOT_INSTALLED in pkg_status:
return self.PARTIAL
elif self.NOT_INSTALLED in pkg_status:
return self.NOT_INSTALLED
else:
return self.INSTALLED
# Handle packages:
else:
filepath = os.path.join(download_dir, info.filename)
if download_dir != self._download_dir:
return self._pkg_status(info, filepath)
else:
if info.id not in self._status_cache:
self._status_cache[info.id] = self._pkg_status(info, filepath)
return self._status_cache[info.id]
def _pkg_status(self, info, filepath):
if not os.path.exists(filepath):
return self.NOT_INSTALLED
# Check if the file has the correct size.
try:
filestat = os.stat(filepath)
except OSError:
return self.NOT_INSTALLED
if filestat.st_size != int(info.size):
return self.STALE
# Check if the file's checksum matches
if md5_hexdigest(filepath) != info.checksum:
return self.STALE
# If it's a zipfile, and it's been at least partially
# unzipped, then check if it's been fully unzipped.
if filepath.endswith('.zip'):
unzipdir = filepath[:-4]
if not os.path.exists(unzipdir):
return self.INSTALLED # but not unzipped -- ok!
if not os.path.isdir(unzipdir):
return self.STALE
unzipped_size = sum(
os.stat(os.path.join(d, f)).st_size
for d, _, files in os.walk(unzipdir)
for f in files
)
if unzipped_size != info.unzipped_size:
return self.STALE
# Otherwise, everything looks good.
return self.INSTALLED
def update(self, quiet=False, prefix='[nltk_data] '):
"""
Re-download any packages whose status is STALE.
"""
self.clear_status_cache()
for pkg in self.packages():
if self.status(pkg) == self.STALE:
self.download(pkg, quiet=quiet, prefix=prefix)
# /////////////////////////////////////////////////////////////////
# Index
# /////////////////////////////////////////////////////////////////
def _update_index(self, url=None):
"""A helper function that ensures that self._index is
up-to-date. If the index is older than self.INDEX_TIMEOUT,
then download it again."""
# Check if the index is aleady up-to-date. If so, do nothing.
if not (
self._index is None
or url is not None
or time.time() - self._index_timestamp > self.INDEX_TIMEOUT
):
return
# If a URL was specified, then update our URL.
self._url = url or self._url
# Download the index file.
self._index = nltk.internals.ElementWrapper(
ElementTree.parse(urlopen(self._url)).getroot()
)
self._index_timestamp = time.time()
# Build a dictionary of packages.
packages = [Package.fromxml(p) for p in self._index.findall('packages/package')]
self._packages = dict((p.id, p) for p in packages)
# Build a dictionary of collections.
collections = [
Collection.fromxml(c) for c in self._index.findall('collections/collection')
]
self._collections = dict((c.id, c) for c in collections)
# Replace identifiers with actual children in collection.children.
for collection in self._collections.values():
for i, child_id in enumerate(collection.children):
if child_id in self._packages:
collection.children[i] = self._packages[child_id]
elif child_id in self._collections:
collection.children[i] = self._collections[child_id]
else:
print(
'removing collection member with no package: {}'.format(
child_id
)
)
del collection.children[i]
# Fill in collection.packages for each collection.
for collection in self._collections.values():
packages = {}
queue = [collection]
for child in queue:
if isinstance(child, Collection):
queue.extend(child.children)
elif isinstance(child, Package):
packages[child.id] = child
else:
pass
collection.packages = packages.values()
# Flush the status cache
self._status_cache.clear()
def index(self):
"""
Return the XML index describing the packages available from
the data server. If necessary, this index will be downloaded
from the data server.
"""
self._update_index()
return self._index
def info(self, id):
"""Return the ``Package`` or ``Collection`` record for the
given item."""
self._update_index()
if id in self._packages:
return self._packages[id]
if id in self._collections:
return self._collections[id]
raise ValueError('Package %r not found in index' % id)
def xmlinfo(self, id):
"""Return the XML info record for the given item"""
self._update_index()
for package in self._index.findall('packages/package'):
if package.get('id') == id:
return package
for collection in self._index.findall('collections/collection'):
if collection.get('id') == id:
return collection
raise ValueError('Package %r not found in index' % id)
# /////////////////////////////////////////////////////////////////
# URL & Data Directory
# /////////////////////////////////////////////////////////////////
def _get_url(self):
"""The URL for the data server's index file."""
return self._url
def _set_url(self, url):
"""
Set a new URL for the data server. If we're unable to contact
the given url, then the original url is kept.
"""
original_url = self._url
try:
self._update_index(url)
except:
self._url = original_url
raise
url = property(_get_url, _set_url)
def default_download_dir(self):
"""
Return the directory to which packages will be downloaded by
default. This value can be overridden using the constructor,
or on a case-by-case basis using the ``download_dir`` argument when
calling ``download()``.
On Windows, the default download directory is
``PYTHONHOME/lib/nltk``, where *PYTHONHOME* is the
directory containing Python, e.g. ``C:\\Python25``.
On all other platforms, the default directory is the first of
the following which exists or which can be created with write
permission: ``/usr/share/nltk_data``, ``/usr/local/share/nltk_data``,
``/usr/lib/nltk_data``, ``/usr/local/lib/nltk_data``, ``~/nltk_data``.
"""
# Check if we are on GAE where we cannot write into filesystem.
if 'APPENGINE_RUNTIME' in os.environ:
return
# Check if we have sufficient permissions to install in a
# variety of system-wide locations.
for nltkdir in nltk.data.path:
if os.path.exists(nltkdir) and nltk.internals.is_writable(nltkdir):
return nltkdir
# On Windows, use %APPDATA%
if sys.platform == 'win32' and 'APPDATA' in os.environ:
homedir = os.environ['APPDATA']
# Otherwise, install in the user's home directory.
else:
homedir = os.path.expanduser('~/')
if homedir == '~/':
raise ValueError("Could not find a default download directory")
# append "nltk_data" to the home directory
return os.path.join(homedir, 'nltk_data')
def _get_download_dir(self):
"""
The default directory to which packages will be downloaded.
This defaults to the value returned by ``default_download_dir()``.
To override this default on a case-by-case basis, use the
``download_dir`` argument when calling ``download()``.
"""
return self._download_dir
def _set_download_dir(self, download_dir):
self._download_dir = download_dir
# Clear the status cache.
self._status_cache.clear()
download_dir = property(_get_download_dir, _set_download_dir)
# /////////////////////////////////////////////////////////////////
# Interactive Shell
# /////////////////////////////////////////////////////////////////
def _interactive_download(self):
# Try the GUI first; if that doesn't work, try the simple
# interactive shell.
if TKINTER:
try:
DownloaderGUI(self).mainloop()
except TclError:
DownloaderShell(self).run()
else:
DownloaderShell(self).run()
class DownloaderShell(object):
def __init__(self, dataserver):
self._ds = dataserver
def _simple_interactive_menu(self, *options):
print('-' * 75)
spc = (68 - sum(len(o) for o in options)) // (len(options) - 1) * ' '
print(' ' + spc.join(options))
# w = 76/len(options)
# fmt = ' ' + ('%-'+str(w)+'s')*(len(options)-1) + '%s'
# print fmt % options
print('-' * 75)
def run(self):
print('NLTK Downloader')
while True:
self._simple_interactive_menu(
'd) Download',
'l) List',
' u) Update',
'c) Config',
'h) Help',
'q) Quit',
)
user_input = input('Downloader> ').strip()
if not user_input:
print()
continue
command = user_input.lower().split()[0]
args = user_input.split()[1:]
try:
if command == 'l':
print()
self._ds.list(self._ds.download_dir, header=False, more_prompt=True)
elif command == 'h':
self._simple_interactive_help()
elif command == 'c':
self._simple_interactive_config()
elif command in ('q', 'x'):
return
elif command == 'd':
self._simple_interactive_download(args)
elif command == 'u':
self._simple_interactive_update()
else:
print('Command %r unrecognized' % user_input)
except HTTPError as e:
print('Error reading from server: %s' % e)
except URLError as e:
print('Error connecting to server: %s' % e.reason)
# try checking if user_input is a package name, &
# downloading it?
print()
def _simple_interactive_download(self, args):
if args:
for arg in args:
try:
self._ds.download(arg, prefix=' ')
except (IOError, ValueError) as e:
print(e)
else:
while True:
print()
print('Download which package (l=list; x=cancel)?')
user_input = input(' Identifier> ')
if user_input.lower() == 'l':
self._ds.list(
self._ds.download_dir,
header=False,
more_prompt=True,
skip_installed=True,
)
continue
elif user_input.lower() in ('x', 'q', ''):
return
elif user_input:
for id in user_input.split():
try:
self._ds.download(id, prefix=' ')
except (IOError, ValueError) as e:
print(e)
break
def _simple_interactive_update(self):
while True:
stale_packages = []
stale = partial = False
for info in sorted(getattr(self._ds, 'packages')(), key=str):
if self._ds.status(info) == self._ds.STALE:
stale_packages.append((info.id, info.name))
print()
if stale_packages:
print('Will update following packages (o=ok; x=cancel)')
for pid, pname in stale_packages:
name = textwrap.fill(
'-' * 27 + (pname), 75, subsequent_indent=27 * ' '
)[27:]
print(' [ ] %s %s' % (pid.ljust(20, '.'), name))
print()
user_input = input(' Identifier> ')
if user_input.lower() == 'o':
for pid, pname in stale_packages:
try:
self._ds.download(pid, prefix=' ')
except (IOError, ValueError) as e:
print(e)
break
elif user_input.lower() in ('x', 'q', ''):
return
else:
print('Nothing to update.')
return
def _simple_interactive_help(self):
print()
print('Commands:')
print(
' d) Download a package or collection u) Update out of date packages'
)
print(' l) List packages & collections h) Help')
print(' c) View & Modify Configuration q) Quit')
def _show_config(self):
print()
print('Data Server:')
print(' - URL: <%s>' % self._ds.url)
print((' - %d Package Collections Available' % len(self._ds.collections())))
print((' - %d Individual Packages Available' % len(self._ds.packages())))
print()
print('Local Machine:')
print(' - Data directory: %s' % self._ds.download_dir)
def _simple_interactive_config(self):
self._show_config()
while True:
print()
self._simple_interactive_menu(
's) Show Config', 'u) Set Server URL', 'd) Set Data Dir', 'm) Main Menu'
)
user_input = input('Config> ').strip().lower()
if user_input == 's':
self._show_config()
elif user_input == 'd':
new_dl_dir = input(' New Directory> ').strip()
if new_dl_dir in ('', 'x', 'q', 'X', 'Q'):
print(' Cancelled!')
elif os.path.isdir(new_dl_dir):
self._ds.download_dir = new_dl_dir
else:
print(('Directory %r not found! Create it first.' % new_dl_dir))
elif user_input == 'u':
new_url = input(' New URL> ').strip()
if new_url in ('', 'x', 'q', 'X', 'Q'):
print(' Cancelled!')
else:
if not new_url.startswith(('http://', 'https://')):
new_url = 'http://' + new_url
try:
self._ds.url = new_url
except Exception as e:
print('Error reading <%r>:\n %s' % (new_url, e))
elif user_input == 'm':
break
class DownloaderGUI(object):
"""
Graphical interface for downloading packages from the NLTK data
server.
"""
# /////////////////////////////////////////////////////////////////
# Column Configuration
# /////////////////////////////////////////////////////////////////
COLUMNS = [
'',
'Identifier',
'Name',
'Size',
'Status',
'Unzipped Size',
'Copyright',
'Contact',
'License',
'Author',
'Subdir',
'Checksum',
]
"""A list of the names of columns. This controls the order in
which the columns will appear. If this is edited, then
``_package_to_columns()`` may need to be edited to match."""
COLUMN_WEIGHTS = {'': 0, 'Name': 5, 'Size': 0, 'Status': 0}
"""A dictionary specifying how columns should be resized when the
table is resized. Columns with weight 0 will not be resized at
all; and columns with high weight will be resized more.
Default weight (for columns not explicitly listed) is 1."""
COLUMN_WIDTHS = {
'': 1,
'Identifier': 20,
'Name': 45,
'Size': 10,
'Unzipped Size': 10,
'Status': 12,
}
"""A dictionary specifying how wide each column should be, in
characters. The default width (for columns not explicitly
listed) is specified by ``DEFAULT_COLUMN_WIDTH``."""
DEFAULT_COLUMN_WIDTH = 30
"""The default width for columns that are not explicitly listed
in ``COLUMN_WIDTHS``."""
INITIAL_COLUMNS = ['', 'Identifier', 'Name', 'Size', 'Status']
"""The set of columns that should be displayed by default."""
# Perform a few import-time sanity checks to make sure that the
# column configuration variables are defined consistently:
for c in COLUMN_WEIGHTS:
assert c in COLUMNS
for c in COLUMN_WIDTHS:
assert c in COLUMNS
for c in INITIAL_COLUMNS:
assert c in COLUMNS
# /////////////////////////////////////////////////////////////////
# Color Configuration
# /////////////////////////////////////////////////////////////////
_BACKDROP_COLOR = ('#000', '#ccc')
_ROW_COLOR = {
Downloader.INSTALLED: ('#afa', '#080'),
Downloader.PARTIAL: ('#ffa', '#880'),
Downloader.STALE: ('#faa', '#800'),
Downloader.NOT_INSTALLED: ('#fff', '#888'),
}
_MARK_COLOR = ('#000', '#ccc')
# _FRONT_TAB_COLOR = ('#ccf', '#008')
# _BACK_TAB_COLOR = ('#88a', '#448')
_FRONT_TAB_COLOR = ('#fff', '#45c')
_BACK_TAB_COLOR = ('#aaa', '#67a')
_PROGRESS_COLOR = ('#f00', '#aaa')
_TAB_FONT = 'helvetica -16 bold'
# /////////////////////////////////////////////////////////////////
# Constructor
# /////////////////////////////////////////////////////////////////
def __init__(self, dataserver, use_threads=True):
self._ds = dataserver
self._use_threads = use_threads
# For the threaded downloader:
self._download_lock = threading.Lock()
self._download_msg_queue = []
self._download_abort_queue = []
self._downloading = False
# For tkinter after callbacks:
self._afterid = {}
# A message log.
self._log_messages = []
self._log_indent = 0
self._log('NLTK Downloader Started!')
# Create the main window.
top = self.top = Tk()
top.geometry('+50+50')
top.title('NLTK Downloader')
top.configure(background=self._BACKDROP_COLOR[1])
# Set up some bindings now, in case anything goes wrong.
top.bind('<Control-q>', self.destroy)
top.bind('<Control-x>', self.destroy)
self._destroyed = False
self._column_vars = {}
# Initialize the GUI.
self._init_widgets()
self._init_menu()
try:
self._fill_table()
except HTTPError as e:
showerror('Error reading from server', e)
except URLError as e:
showerror('Error connecting to server', e.reason)
self._show_info()
self._select_columns()
self._table.select(0)
# Make sure we get notified when we're destroyed, so we can
# cancel any download in progress.
self._table.bind('<Destroy>', self._destroy)
def _log(self, msg):
self._log_messages.append(
'%s %s%s' % (time.ctime(), ' | ' * self._log_indent, msg)
)
# /////////////////////////////////////////////////////////////////
# Internals
# /////////////////////////////////////////////////////////////////
def _init_widgets(self):
# Create the top-level frame structures
f1 = Frame(self.top, relief='raised', border=2, padx=8, pady=0)
f1.pack(sid='top', expand=True, fill='both')
f1.grid_rowconfigure(2, weight=1)
f1.grid_columnconfigure(0, weight=1)
Frame(f1, height=8).grid(column=0, row=0) # spacer
tabframe = Frame(f1)
tabframe.grid(column=0, row=1, sticky='news')
tableframe = Frame(f1)
tableframe.grid(column=0, row=2, sticky='news')
buttonframe = Frame(f1)
buttonframe.grid(column=0, row=3, sticky='news')
Frame(f1, height=8).grid(column=0, row=4) # spacer
infoframe = Frame(f1)
infoframe.grid(column=0, row=5, sticky='news')
Frame(f1, height=8).grid(column=0, row=6) # spacer
progressframe = Frame(
self.top, padx=3, pady=3, background=self._BACKDROP_COLOR[1]
)
progressframe.pack(side='bottom', fill='x')
self.top['border'] = 0
self.top['highlightthickness'] = 0
# Create the tabs
self._tab_names = ['Collections', 'Corpora', 'Models', 'All Packages']
self._tabs = {}
for i, tab in enumerate(self._tab_names):
label = Label(tabframe, text=tab, font=self._TAB_FONT)
label.pack(side='left', padx=((i + 1) % 2) * 10)
label.bind('<Button-1>', self._select_tab)
self._tabs[tab.lower()] = label
# Create the table.
column_weights = [self.COLUMN_WEIGHTS.get(column, 1) for column in self.COLUMNS]
self._table = Table(
tableframe,
self.COLUMNS,
column_weights=column_weights,
highlightthickness=0,
listbox_height=16,
reprfunc=self._table_reprfunc,
)
self._table.columnconfig(0, foreground=self._MARK_COLOR[0]) # marked
for i, column in enumerate(self.COLUMNS):
width = self.COLUMN_WIDTHS.get(column, self.DEFAULT_COLUMN_WIDTH)
self._table.columnconfig(i, width=width)
self._table.pack(expand=True, fill='both')
self._table.focus()
self._table.bind_to_listboxes('<Double-Button-1>', self._download)
self._table.bind('<space>', self._table_mark)
self._table.bind('<Return>', self._download)
self._table.bind('<Left>', self._prev_tab)
self._table.bind('<Right>', self._next_tab)
self._table.bind('<Control-a>', self._mark_all)
# Create entry boxes for URL & download_dir
infoframe.grid_columnconfigure(1, weight=1)
info = [
('url', 'Server Index:', self._set_url),
('download_dir', 'Download Directory:', self._set_download_dir),
]
self._info = {}
for (i, (key, label, callback)) in enumerate(info):
Label(infoframe, text=label).grid(column=0, row=i, sticky='e')
entry = Entry(
infoframe, font='courier', relief='groove', disabledforeground='black'
)
self._info[key] = (entry, callback)
entry.bind('<Return>', self._info_save)
entry.bind('<Button-1>', lambda e, key=key: self._info_edit(key))
entry.grid(column=1, row=i, sticky='ew')
# If the user edits url or download_dir, and then clicks outside
# the entry box, then save their results.
self.top.bind('<Button-1>', self._info_save)
# Create Download & Refresh buttons.
self._download_button = Button(
buttonframe, text='Download', command=self._download, width=8
)
self._download_button.pack(side='left')
self._refresh_button = Button(
buttonframe, text='Refresh', command=self._refresh, width=8
)
self._refresh_button.pack(side='right')
# Create Progress bar
self._progresslabel = Label(
progressframe,
text='',
foreground=self._BACKDROP_COLOR[0],
background=self._BACKDROP_COLOR[1],
)
self._progressbar = Canvas(
progressframe,
width=200,
height=16,
background=self._PROGRESS_COLOR[1],
relief='sunken',
border=1,
)
self._init_progressbar()
self._progressbar.pack(side='right')
self._progresslabel.pack(side='left')
def _init_menu(self):
menubar = Menu(self.top)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(
label='Download', underline=0, command=self._download, accelerator='Return'
)
filemenu.add_separator()
filemenu.add_command(
label='Change Server Index',
underline=7,
command=lambda: self._info_edit('url'),
)
filemenu.add_command(
label='Change Download Directory',
underline=0,
command=lambda: self._info_edit('download_dir'),
)
filemenu.add_separator()
filemenu.add_command(label='Show Log', underline=5, command=self._show_log)
filemenu.add_separator()
filemenu.add_command(
label='Exit', underline=1, command=self.destroy, accelerator='Ctrl-x'
)
menubar.add_cascade(label='File', underline=0, menu=filemenu)
# Create a menu to control which columns of the table are
# shown. n.b.: we never hide the first two columns (mark and
# identifier).
viewmenu = Menu(menubar, tearoff=0)
for column in self._table.column_names[2:]:
var = IntVar(self.top)
assert column not in self._column_vars
self._column_vars[column] = var
if column in self.INITIAL_COLUMNS:
var.set(1)
viewmenu.add_checkbutton(
label=column, underline=0, variable=var, command=self._select_columns
)
menubar.add_cascade(label='View', underline=0, menu=viewmenu)
# Create a sort menu
# [xx] this should be selectbuttons; and it should include
# reversed sorts as options.
sortmenu = Menu(menubar, tearoff=0)
for column in self._table.column_names[1:]:
sortmenu.add_command(
label='Sort by %s' % column,
command=(lambda c=column: self._table.sort_by(c, 'ascending')),
)
sortmenu.add_separator()
# sortmenu.add_command(label='Descending Sort:')
for column in self._table.column_names[1:]:
sortmenu.add_command(
label='Reverse sort by %s' % column,
command=(lambda c=column: self._table.sort_by(c, 'descending')),
)
menubar.add_cascade(label='Sort', underline=0, menu=sortmenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label='About', underline=0, command=self.about)
helpmenu.add_command(
label='Instructions', underline=0, command=self.help, accelerator='F1'
)
menubar.add_cascade(label='Help', underline=0, menu=helpmenu)
self.top.bind('<F1>', self.help)
self.top.config(menu=menubar)
def _select_columns(self):
for (column, var) in self._column_vars.items():
if var.get():
self._table.show_column(column)
else:
self._table.hide_column(column)
def _refresh(self):
self._ds.clear_status_cache()
try:
self._fill_table()
except HTTPError as e:
showerror('Error reading from server', e)
except URLError as e:
showerror('Error connecting to server', e.reason)
self._table.select(0)
def _info_edit(self, info_key):
self._info_save() # just in case.
(entry, callback) = self._info[info_key]
entry['state'] = 'normal'
entry['relief'] = 'sunken'
entry.focus()
def _info_save(self, e=None):
focus = self._table
for entry, callback in self._info.values():
if entry['state'] == 'disabled':
continue
if e is not None and e.widget is entry and e.keysym != 'Return':
focus = entry
else:
entry['state'] = 'disabled'
entry['relief'] = 'groove'
callback(entry.get())
focus.focus()
def _table_reprfunc(self, row, col, val):
if self._table.column_names[col].endswith('Size'):
if isinstance(val, string_types):
return ' %s' % val
elif val < 1024 ** 2:
return ' %.1f KB' % (val / 1024.0 ** 1)
elif val < 1024 ** 3:
return ' %.1f MB' % (val / 1024.0 ** 2)
else:
return ' %.1f GB' % (val / 1024.0 ** 3)
if col in (0, ''):
return str(val)
else:
return ' %s' % val
def _set_url(self, url):
if url == self._ds.url:
return
try:
self._ds.url = url
self._fill_table()
except IOError as e:
showerror('Error Setting Server Index', str(e))
self._show_info()
def _set_download_dir(self, download_dir):
if self._ds.download_dir == download_dir:
return
# check if the dir exists, and if not, ask if we should create it?
# Clear our status cache, & re-check what's installed
self._ds.download_dir = download_dir
try:
self._fill_table()
except HTTPError as e:
showerror('Error reading from server', e)
except URLError as e:
showerror('Error connecting to server', e.reason)
self._show_info()
def _show_info(self):
print('showing info', self._ds.url)
for entry, cb in self._info.values():
entry['state'] = 'normal'
entry.delete(0, 'end')
self._info['url'][0].insert(0, self._ds.url)
self._info['download_dir'][0].insert(0, self._ds.download_dir)
for entry, cb in self._info.values():
entry['state'] = 'disabled'
def _prev_tab(self, *e):
for i, tab in enumerate(self._tab_names):
if tab.lower() == self._tab and i > 0:
self._tab = self._tab_names[i - 1].lower()
try:
return self._fill_table()
except HTTPError as e:
showerror('Error reading from server', e)
except URLError as e:
showerror('Error connecting to server', e.reason)
def _next_tab(self, *e):
for i, tab in enumerate(self._tab_names):
if tab.lower() == self._tab and i < (len(self._tabs) - 1):
self._tab = self._tab_names[i + 1].lower()
try:
return self._fill_table()
except HTTPError as e:
showerror('Error reading from server', e)
except URLError as e:
showerror('Error connecting to server', e.reason)
def _select_tab(self, event):
self._tab = event.widget['text'].lower()
try:
self._fill_table()
except HTTPError as e:
showerror('Error reading from server', e)
except URLError as e:
showerror('Error connecting to server', e.reason)
_tab = 'collections'
# _tab = 'corpora'
_rows = None
def _fill_table(self):
selected_row = self._table.selected_row()
self._table.clear()
if self._tab == 'all packages':
items = self._ds.packages()
elif self._tab == 'corpora':
items = self._ds.corpora()
elif self._tab == 'models':
items = self._ds.models()
elif self._tab == 'collections':
items = self._ds.collections()
else:
assert 0, 'bad tab value %r' % self._tab
rows = [self._package_to_columns(item) for item in items]
self._table.extend(rows)
# Highlight the active tab.
for tab, label in self._tabs.items():
if tab == self._tab:
label.configure(
foreground=self._FRONT_TAB_COLOR[0],
background=self._FRONT_TAB_COLOR[1],
)
else:
label.configure(
foreground=self._BACK_TAB_COLOR[0],
background=self._BACK_TAB_COLOR[1],
)
self._table.sort_by('Identifier', order='ascending')
self._color_table()
self._table.select(selected_row)
# This is a hack, because the scrollbar isn't updating its
# position right -- I'm not sure what the underlying cause is
# though. (This is on OS X w/ python 2.5) The length of
# delay that's necessary seems to depend on how fast the
# comptuer is. :-/
self.top.after(150, self._table._scrollbar.set, *self._table._mlb.yview())
self.top.after(300, self._table._scrollbar.set, *self._table._mlb.yview())
def _update_table_status(self):
for row_num in range(len(self._table)):
status = self._ds.status(self._table[row_num, 'Identifier'])
self._table[row_num, 'Status'] = status
self._color_table()
def _download(self, *e):
# If we're using threads, then delegate to the threaded
# downloader instead.
if self._use_threads:
return self._download_threaded(*e)
marked = [
self._table[row, 'Identifier']
for row in range(len(self._table))
if self._table[row, 0] != ''
]
selection = self._table.selected_row()
if not marked and selection is not None:
marked = [self._table[selection, 'Identifier']]
download_iter = self._ds.incr_download(marked, self._ds.download_dir)
self._log_indent = 0
self._download_cb(download_iter, marked)
_DL_DELAY = 10
def _download_cb(self, download_iter, ids):
try:
msg = next(download_iter)
except StopIteration:
# self._fill_table(sort=False)
self._update_table_status()
afterid = self.top.after(10, self._show_progress, 0)
self._afterid['_download_cb'] = afterid
return
def show(s):
self._progresslabel['text'] = s
self._log(s)
if isinstance(msg, ProgressMessage):
self._show_progress(msg.progress)
elif isinstance(msg, ErrorMessage):
show(msg.message)
if msg.package is not None:
self._select(msg.package.id)
self._show_progress(None)
return # halt progress.
elif isinstance(msg, StartCollectionMessage):
show('Downloading collection %s' % msg.collection.id)
self._log_indent += 1
elif isinstance(msg, StartPackageMessage):
show('Downloading package %s' % msg.package.id)
elif isinstance(msg, UpToDateMessage):
show('Package %s is up-to-date!' % msg.package.id)
# elif isinstance(msg, StaleMessage):
# show('Package %s is out-of-date or corrupt' % msg.package.id)
elif isinstance(msg, FinishDownloadMessage):
show('Finished downloading %r.' % msg.package.id)
elif isinstance(msg, StartUnzipMessage):
show('Unzipping %s' % msg.package.filename)
elif isinstance(msg, FinishCollectionMessage):
self._log_indent -= 1
show('Finished downloading collection %r.' % msg.collection.id)
self._clear_mark(msg.collection.id)
elif isinstance(msg, FinishPackageMessage):
self._clear_mark(msg.package.id)
afterid = self.top.after(self._DL_DELAY, self._download_cb, download_iter, ids)
self._afterid['_download_cb'] = afterid
def _select(self, id):
for row in range(len(self._table)):
if self._table[row, 'Identifier'] == id:
self._table.select(row)
return
def _color_table(self):
# Color rows according to status.
for row in range(len(self._table)):
bg, sbg = self._ROW_COLOR[self._table[row, 'Status']]
fg, sfg = ('black', 'white')
self._table.rowconfig(
row,
foreground=fg,
selectforeground=sfg,
background=bg,
selectbackground=sbg,
)
# Color the marked column
self._table.itemconfigure(
row, 0, foreground=self._MARK_COLOR[0], background=self._MARK_COLOR[1]
)
def _clear_mark(self, id):
for row in range(len(self._table)):
if self._table[row, 'Identifier'] == id:
self._table[row, 0] = ''
def _mark_all(self, *e):
for row in range(len(self._table)):
self._table[row, 0] = 'X'
def _table_mark(self, *e):
selection = self._table.selected_row()
if selection >= 0:
if self._table[selection][0] != '':
self._table[selection, 0] = ''
else:
self._table[selection, 0] = 'X'
self._table.select(delta=1)
def _show_log(self):
text = '\n'.join(self._log_messages)
ShowText(self.top, 'NLTK Downloader Log', text)
def _package_to_columns(self, pkg):
"""
Given a package, return a list of values describing that
package, one for each column in ``self.COLUMNS``.
"""
row = []
for column_index, column_name in enumerate(self.COLUMNS):
if column_index == 0: # Mark:
row.append('')
elif column_name == 'Identifier':
row.append(pkg.id)
elif column_name == 'Status':
row.append(self._ds.status(pkg))
else:
attr = column_name.lower().replace(' ', '_')
row.append(getattr(pkg, attr, 'n/a'))
return row
# /////////////////////////////////////////////////////////////////
# External Interface
# /////////////////////////////////////////////////////////////////
def destroy(self, *e):
if self._destroyed:
return
self.top.destroy()
self._destroyed = True
def _destroy(self, *e):
if self.top is not None:
for afterid in self._afterid.values():
self.top.after_cancel(afterid)
# Abort any download in progress.
if self._downloading and self._use_threads:
self._abort_download()
# Make sure the garbage collector destroys these now;
# otherwise, they may get destroyed when we're not in the main
# thread, which would make Tkinter unhappy.
self._column_vars.clear()
def mainloop(self, *args, **kwargs):
self.top.mainloop(*args, **kwargs)
# /////////////////////////////////////////////////////////////////
# HELP
# /////////////////////////////////////////////////////////////////
HELP = textwrap.dedent(
"""\
This tool can be used to download a variety of corpora and models
that can be used with NLTK. Each corpus or model is distributed
in a single zip file, known as a \"package file.\" You can
download packages individually, or you can download pre-defined
collections of packages.
When you download a package, it will be saved to the \"download
directory.\" A default download directory is chosen when you run
the downloader; but you may also select a different download
directory. On Windows, the default download directory is
\"package.\"
The NLTK downloader can be used to download a variety of corpora,
models, and other data packages.
Keyboard shortcuts::
[return]\t Download
[up]\t Select previous package
[down]\t Select next package
[left]\t Select previous tab
[right]\t Select next tab
"""
)
def help(self, *e):
# The default font's not very legible; try using 'fixed' instead.
try:
ShowText(
self.top,
'Help: NLTK Dowloader',
self.HELP.strip(),
width=75,
font='fixed',
)
except:
ShowText(self.top, 'Help: NLTK Downloader', self.HELP.strip(), width=75)
def about(self, *e):
ABOUT = "NLTK Downloader\n" + "Written by Edward Loper"
TITLE = 'About: NLTK Downloader'
try:
from six.moves.tkinter_messagebox import Message
Message(message=ABOUT, title=TITLE).show()
except ImportError:
ShowText(self.top, TITLE, ABOUT)
# /////////////////////////////////////////////////////////////////
# Progress Bar
# /////////////////////////////////////////////////////////////////
_gradient_width = 5
def _init_progressbar(self):
c = self._progressbar
width, height = int(c['width']), int(c['height'])
for i in range(0, (int(c['width']) * 2) // self._gradient_width):
c.create_line(
i * self._gradient_width + 20,
-20,
i * self._gradient_width - height - 20,
height + 20,
width=self._gradient_width,
fill='#%02x0000' % (80 + abs(i % 6 - 3) * 12),
)
c.addtag_all('gradient')
c.itemconfig('gradient', state='hidden')
# This is used to display progress
c.addtag_withtag(
'redbox', c.create_rectangle(0, 0, 0, 0, fill=self._PROGRESS_COLOR[0])
)
def _show_progress(self, percent):
c = self._progressbar
if percent is None:
c.coords('redbox', 0, 0, 0, 0)
c.itemconfig('gradient', state='hidden')
else:
width, height = int(c['width']), int(c['height'])
x = percent * int(width) // 100 + 1
c.coords('redbox', 0, 0, x, height + 1)
def _progress_alive(self):
c = self._progressbar
if not self._downloading:
c.itemconfig('gradient', state='hidden')
else:
c.itemconfig('gradient', state='normal')
x1, y1, x2, y2 = c.bbox('gradient')
if x1 <= -100:
c.move('gradient', (self._gradient_width * 6) - 4, 0)
else:
c.move('gradient', -4, 0)
afterid = self.top.after(200, self._progress_alive)
self._afterid['_progress_alive'] = afterid
# /////////////////////////////////////////////////////////////////
# Threaded downloader
# /////////////////////////////////////////////////////////////////
def _download_threaded(self, *e):
# If the user tries to start a new download while we're already
# downloading something, then abort the current download instead.
if self._downloading:
self._abort_download()
return
# Change the 'download' button to an 'abort' button.
self._download_button['text'] = 'Cancel'
marked = [
self._table[row, 'Identifier']
for row in range(len(self._table))
if self._table[row, 0] != ''
]
selection = self._table.selected_row()
if not marked and selection is not None:
marked = [self._table[selection, 'Identifier']]
# Create a new data server object for the download operation,
# just in case the user modifies our data server during the
# download (e.g., clicking 'refresh' or editing the index url).
ds = Downloader(self._ds.url, self._ds.download_dir)
# Start downloading in a separate thread.
assert self._download_msg_queue == []
assert self._download_abort_queue == []
self._DownloadThread(
ds,
marked,
self._download_lock,
self._download_msg_queue,
self._download_abort_queue,
).start()
# Monitor the download message queue & display its progress.
self._log_indent = 0
self._downloading = True
self._monitor_message_queue()
# Display an indication that we're still alive and well by
# cycling the progress bar.
self._progress_alive()
def _abort_download(self):
if self._downloading:
self._download_lock.acquire()
self._download_abort_queue.append('abort')
self._download_lock.release()
class _DownloadThread(threading.Thread):
def __init__(self, data_server, items, lock, message_queue, abort):
self.data_server = data_server
self.items = items
self.lock = lock
self.message_queue = message_queue
self.abort = abort
threading.Thread.__init__(self)
def run(self):
for msg in self.data_server.incr_download(self.items):
self.lock.acquire()
self.message_queue.append(msg)
# Check if we've been told to kill ourselves:
if self.abort:
self.message_queue.append('aborted')
self.lock.release()
return
self.lock.release()
self.lock.acquire()
self.message_queue.append('finished')
self.lock.release()
_MONITOR_QUEUE_DELAY = 100
def _monitor_message_queue(self):
def show(s):
self._progresslabel['text'] = s
self._log(s)
# Try to acquire the lock; if it's busy, then just try again later.
if not self._download_lock.acquire():
return
for msg in self._download_msg_queue:
# Done downloading?
if msg == 'finished' or msg == 'aborted':
# self._fill_table(sort=False)
self._update_table_status()
self._downloading = False
self._download_button['text'] = 'Download'
del self._download_msg_queue[:]
del self._download_abort_queue[:]
self._download_lock.release()
if msg == 'aborted':
show('Download aborted!')
self._show_progress(None)
else:
afterid = self.top.after(100, self._show_progress, None)
self._afterid['_monitor_message_queue'] = afterid
return
# All other messages
elif isinstance(msg, ProgressMessage):
self._show_progress(msg.progress)
elif isinstance(msg, ErrorMessage):
show(msg.message)
if msg.package is not None:
self._select(msg.package.id)
self._show_progress(None)
self._downloading = False
return # halt progress.
elif isinstance(msg, StartCollectionMessage):
show('Downloading collection %r' % msg.collection.id)
self._log_indent += 1
elif isinstance(msg, StartPackageMessage):
self._ds.clear_status_cache(msg.package.id)
show('Downloading package %r' % msg.package.id)
elif isinstance(msg, UpToDateMessage):
show('Package %s is up-to-date!' % msg.package.id)
# elif isinstance(msg, StaleMessage):
# show('Package %s is out-of-date or corrupt; updating it' %
# msg.package.id)
elif isinstance(msg, FinishDownloadMessage):
show('Finished downloading %r.' % msg.package.id)
elif isinstance(msg, StartUnzipMessage):
show('Unzipping %s' % msg.package.filename)
elif isinstance(msg, FinishUnzipMessage):
show('Finished installing %s' % msg.package.id)
elif isinstance(msg, FinishCollectionMessage):
self._log_indent -= 1
show('Finished downloading collection %r.' % msg.collection.id)
self._clear_mark(msg.collection.id)
elif isinstance(msg, FinishPackageMessage):
self._update_table_status()
self._clear_mark(msg.package.id)
# Let the user know when we're aborting a download (but
# waiting for a good point to abort it, so we don't end up
# with a partially unzipped package or anything like that).
if self._download_abort_queue:
self._progresslabel['text'] = 'Aborting download...'
# Clear the message queue and then release the lock
del self._download_msg_queue[:]
self._download_lock.release()
# Check the queue again after MONITOR_QUEUE_DELAY msec.
afterid = self.top.after(self._MONITOR_QUEUE_DELAY, self._monitor_message_queue)
self._afterid['_monitor_message_queue'] = afterid
######################################################################
# Helper Functions
######################################################################
# [xx] It may make sense to move these to nltk.internals.
def md5_hexdigest(file):
"""
Calculate and return the MD5 checksum for a given file.
``file`` may either be a filename or an open stream.
"""
if isinstance(file, string_types):
with open(file, 'rb') as infile:
return _md5_hexdigest(infile)
return _md5_hexdigest(file)
def _md5_hexdigest(fp):
md5_digest = md5()
while True:
block = fp.read(1024 * 16) # 16k blocks
if not block:
break
md5_digest.update(block)
return md5_digest.hexdigest()
# change this to periodically yield progress messages?
# [xx] get rid of topdir parameter -- we should be checking
# this when we build the index, anyway.
def unzip(filename, root, verbose=True):
"""
Extract the contents of the zip file ``filename`` into the
directory ``root``.
"""
for message in _unzip_iter(filename, root, verbose):
if isinstance(message, ErrorMessage):
raise Exception(message)
def _unzip_iter(filename, root, verbose=True):
if verbose:
sys.stdout.write('Unzipping %s' % os.path.split(filename)[1])
sys.stdout.flush()
try:
zf = zipfile.ZipFile(filename)
except zipfile.error as e:
yield ErrorMessage(filename, 'Error with downloaded zip file')
return
except Exception as e:
yield ErrorMessage(filename, e)
return
# Get lists of directories & files
namelist = zf.namelist()
dirlist = set()
for x in namelist:
if x.endswith('/'):
dirlist.add(x)
else:
dirlist.add(x.rsplit('/', 1)[0] + '/')
filelist = [x for x in namelist if not x.endswith('/')]
# Create the target directory if it doesn't exist
if not os.path.exists(root):
os.mkdir(root)
# Create the directory structure
for dirname in sorted(dirlist):
pieces = dirname[:-1].split('/')
for i in range(len(pieces)):
dirpath = os.path.join(root, *pieces[: i + 1])
if not os.path.exists(dirpath):
os.mkdir(dirpath)
# Extract files.
for i, filename in enumerate(filelist):
filepath = os.path.join(root, *filename.split('/'))
try:
with open(filepath, 'wb') as dstfile, zf.open(filename) as srcfile:
shutil.copyfileobj(srcfile, dstfile)
except Exception as e:
yield ErrorMessage(filename, e)
return
if verbose and (i * 10 / len(filelist) > (i - 1) * 10 / len(filelist)):
sys.stdout.write('.')
sys.stdout.flush()
if verbose:
print()
######################################################################
# Index Builder
######################################################################
# This may move to a different file sometime.
def build_index(root, base_url):
"""
Create a new data.xml index file, by combining the xml description
files for various packages and collections. ``root`` should be the
path to a directory containing the package xml and zip files; and
the collection xml files. The ``root`` directory is expected to
have the following subdirectories::
root/
packages/ .................. subdirectory for packages
corpora/ ................. zip & xml files for corpora
grammars/ ................ zip & xml files for grammars
taggers/ ................. zip & xml files for taggers
tokenizers/ .............. zip & xml files for tokenizers
etc.
collections/ ............... xml files for collections
For each package, there should be two files: ``package.zip``
(where *package* is the package name)
which contains the package itself as a compressed zip file; and
``package.xml``, which is an xml description of the package. The
zipfile ``package.zip`` should expand to a single subdirectory
named ``package/``. The base filename ``package`` must match
the identifier given in the package's xml file.
For each collection, there should be a single file ``collection.zip``
describing the collection, where *collection* is the name of the collection.
All identifiers (for both packages and collections) must be unique.
"""
# Find all packages.
packages = []
for pkg_xml, zf, subdir in _find_packages(os.path.join(root, 'packages')):
zipstat = os.stat(zf.filename)
url = '%s/%s/%s' % (base_url, subdir, os.path.split(zf.filename)[1])
unzipped_size = sum(zf_info.file_size for zf_info in zf.infolist())
# Fill in several fields of the package xml with calculated values.
pkg_xml.set('unzipped_size', '%s' % unzipped_size)
pkg_xml.set('size', '%s' % zipstat.st_size)
pkg_xml.set('checksum', '%s' % md5_hexdigest(zf.filename))
pkg_xml.set('subdir', subdir)
# pkg_xml.set('svn_revision', _svn_revision(zf.filename))
if not pkg_xml.get('url'):
pkg_xml.set('url', url)
# Record the package.
packages.append(pkg_xml)
# Find all collections
collections = list(_find_collections(os.path.join(root, 'collections')))
# Check that all UIDs are unique
uids = set()
for item in packages + collections:
if item.get('id') in uids:
raise ValueError('Duplicate UID: %s' % item.get('id'))
uids.add(item.get('id'))
# Put it all together
top_elt = ElementTree.Element('nltk_data')
top_elt.append(ElementTree.Element('packages'))
for package in packages:
top_elt[0].append(package)
top_elt.append(ElementTree.Element('collections'))
for collection in collections:
top_elt[1].append(collection)
_indent_xml(top_elt)
return top_elt
def _indent_xml(xml, prefix=''):
"""
Helper for ``build_index()``: Given an XML ``ElementTree``, modify it
(and its descendents) ``text`` and ``tail`` attributes to generate
an indented tree, where each nested element is indented by 2
spaces with respect to its parent.
"""
if len(xml) > 0:
xml.text = (xml.text or '').strip() + '\n' + prefix + ' '
for child in xml:
_indent_xml(child, prefix + ' ')
for child in xml[:-1]:
child.tail = (child.tail or '').strip() + '\n' + prefix + ' '
xml[-1].tail = (xml[-1].tail or '').strip() + '\n' + prefix
def _check_package(pkg_xml, zipfilename, zf):
"""
Helper for ``build_index()``: Perform some checks to make sure that
the given package is consistent.
"""
# The filename must patch the id given in the XML file.
uid = os.path.splitext(os.path.split(zipfilename)[1])[0]
if pkg_xml.get('id') != uid:
raise ValueError(
'package identifier mismatch (%s vs %s)' % (pkg_xml.get('id'), uid)
)
# Zip file must expand to a subdir whose name matches uid.
if sum((name != uid and not name.startswith(uid + '/')) for name in zf.namelist()):
raise ValueError(
'Zipfile %s.zip does not expand to a single '
'subdirectory %s/' % (uid, uid)
)
# update for git?
def _svn_revision(filename):
"""
Helper for ``build_index()``: Calculate the subversion revision
number for a given file (by using ``subprocess`` to run ``svn``).
"""
p = subprocess.Popen(
['svn', 'status', '-v', filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(stdout, stderr) = p.communicate()
if p.returncode != 0 or stderr or not stdout:
raise ValueError(
'Error determining svn_revision for %s: %s'
% (os.path.split(filename)[1], textwrap.fill(stderr))
)
return stdout.split()[2]
def _find_collections(root):
"""
Helper for ``build_index()``: Yield a list of ElementTree.Element
objects, each holding the xml for a single package collection.
"""
packages = []
for dirname, subdirs, files in os.walk(root):
for filename in files:
if filename.endswith('.xml'):
xmlfile = os.path.join(dirname, filename)
yield ElementTree.parse(xmlfile).getroot()
def _find_packages(root):
"""
Helper for ``build_index()``: Yield a list of tuples
``(pkg_xml, zf, subdir)``, where:
- ``pkg_xml`` is an ``ElementTree.Element`` holding the xml for a
package
- ``zf`` is a ``zipfile.ZipFile`` for the package's contents.
- ``subdir`` is the subdirectory (relative to ``root``) where
the package was found (e.g. 'corpora' or 'grammars').
"""
from nltk.corpus.reader.util import _path_from
# Find all packages.
packages = []
for dirname, subdirs, files in os.walk(root):
relpath = '/'.join(_path_from(root, dirname))
for filename in files:
if filename.endswith('.xml'):
xmlfilename = os.path.join(dirname, filename)
zipfilename = xmlfilename[:-4] + '.zip'
try:
zf = zipfile.ZipFile(zipfilename)
except Exception as e:
raise ValueError('Error reading file %r!\n%s' % (zipfilename, e))
try:
pkg_xml = ElementTree.parse(xmlfilename).getroot()
except Exception as e:
raise ValueError('Error reading file %r!\n%s' % (xmlfilename, e))
# Check that the UID matches the filename
uid = os.path.split(xmlfilename[:-4])[1]
if pkg_xml.get('id') != uid:
raise ValueError(
'package identifier mismatch (%s '
'vs %s)' % (pkg_xml.get('id'), uid)
)
# Check that the zipfile expands to a subdir whose
# name matches the uid.
if sum(
(name != uid and not name.startswith(uid + '/'))
for name in zf.namelist()
):
raise ValueError(
'Zipfile %s.zip does not expand to a '
'single subdirectory %s/' % (uid, uid)
)
yield pkg_xml, zf, relpath
# Don't recurse into svn subdirectories:
try:
subdirs.remove('.svn')
except ValueError:
pass
######################################################################
# Main:
######################################################################
# There should be a command-line interface
# Aliases
_downloader = Downloader()
download = _downloader.download
def download_shell():
DownloaderShell(_downloader).run()
def download_gui():
DownloaderGUI(_downloader).mainloop()
def update():
_downloader.update()
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option(
"-d",
"--dir",
dest="dir",
help="download package to directory DIR",
metavar="DIR",
)
parser.add_option(
"-q",
"--quiet",
dest="quiet",
action="store_true",
default=False,
help="work quietly",
)
parser.add_option(
"-f",
"--force",
dest="force",
action="store_true",
default=False,
help="download even if already installed",
)
parser.add_option(
"-e",
"--exit-on-error",
dest="halt_on_error",
action="store_true",
default=False,
help="exit if an error occurs",
)
parser.add_option(
"-u",
"--url",
dest="server_index_url",
default=os.environ.get('NLTK_DOWNLOAD_URL'),
help="download server index url",
)
(options, args) = parser.parse_args()
downloader = Downloader(server_index_url=options.server_index_url)
if args:
for pkg_id in args:
rv = downloader.download(
info_or_id=pkg_id,
download_dir=options.dir,
quiet=options.quiet,
force=options.force,
halt_on_error=options.halt_on_error,
)
if rv == False and options.halt_on_error:
break
else:
downloader.download(
download_dir=options.dir,
quiet=options.quiet,
force=options.force,
halt_on_error=options.halt_on_error,
)
| ./CrossVul/dataset_final_sorted/CWE-22/py/bad_994_0 |
crossvul-python_data_good_4110_1 | """ Generate modern Python clients from OpenAPI """
from __future__ import annotations
import shutil
import subprocess
import sys
from pathlib import Path
from typing import Any, Dict, Optional, Sequence, Union
import httpcore
import httpx
import yaml
from jinja2 import Environment, PackageLoader
from openapi_python_client import utils
from .parser import GeneratorData, import_string_from_reference
from .parser.errors import GeneratorError
if sys.version_info.minor == 7: # version did not exist in 3.7, need to use a backport
from importlib_metadata import version
else:
from importlib.metadata import version # type: ignore
__version__ = version(__package__)
def _get_project_for_url_or_path(url: Optional[str], path: Optional[Path]) -> Union[Project, GeneratorError]:
data_dict = _get_document(url=url, path=path)
if isinstance(data_dict, GeneratorError):
return data_dict
openapi = GeneratorData.from_dict(data_dict)
if isinstance(openapi, GeneratorError):
return openapi
return Project(openapi=openapi)
def create_new_client(*, url: Optional[str], path: Optional[Path]) -> Sequence[GeneratorError]:
"""
Generate the client library
Returns:
A list containing any errors encountered when generating.
"""
project = _get_project_for_url_or_path(url=url, path=path)
if isinstance(project, GeneratorError):
return [project]
return project.build()
def update_existing_client(*, url: Optional[str], path: Optional[Path]) -> Sequence[GeneratorError]:
"""
Update an existing client library
Returns:
A list containing any errors encountered when generating.
"""
project = _get_project_for_url_or_path(url=url, path=path)
if isinstance(project, GeneratorError):
return [project]
return project.update()
def _get_document(*, url: Optional[str], path: Optional[Path]) -> Union[Dict[str, Any], GeneratorError]:
yaml_bytes: bytes
if url is not None and path is not None:
return GeneratorError(header="Provide URL or Path, not both.")
if url is not None:
try:
response = httpx.get(url)
yaml_bytes = response.content
except (httpx.HTTPError, httpcore.NetworkError):
return GeneratorError(header="Could not get OpenAPI document from provided URL")
elif path is not None:
yaml_bytes = path.read_bytes()
else:
return GeneratorError(header="No URL or Path provided")
try:
return yaml.safe_load(yaml_bytes)
except yaml.YAMLError:
return GeneratorError(header="Invalid YAML from provided source")
class Project:
TEMPLATE_FILTERS = {"snakecase": utils.snake_case, "kebabcase": utils.kebab_case}
project_name_override: Optional[str] = None
package_name_override: Optional[str] = None
def __init__(self, *, openapi: GeneratorData) -> None:
self.openapi: GeneratorData = openapi
self.env: Environment = Environment(loader=PackageLoader(__package__), trim_blocks=True, lstrip_blocks=True)
self.project_name: str = self.project_name_override or f"{utils.kebab_case(openapi.title).lower()}-client"
self.project_dir: Path = Path.cwd() / self.project_name
self.package_name: str = self.package_name_override or self.project_name.replace("-", "_")
self.package_dir: Path = self.project_dir / self.package_name
self.package_description: str = f"A client library for accessing {self.openapi.title}"
self.version: str = openapi.version
self.env.filters.update(self.TEMPLATE_FILTERS)
def build(self) -> Sequence[GeneratorError]:
""" Create the project from templates """
print(f"Generating {self.project_name}")
try:
self.project_dir.mkdir()
except FileExistsError:
return [GeneratorError(detail="Directory already exists. Delete it or use the update command.")]
self._create_package()
self._build_metadata()
self._build_models()
self._build_api()
self._reformat()
return self._get_errors()
def update(self) -> Sequence[GeneratorError]:
""" Update an existing project """
if not self.package_dir.is_dir():
raise FileNotFoundError()
print(f"Updating {self.project_name}")
shutil.rmtree(self.package_dir)
self._create_package()
self._build_models()
self._build_api()
self._reformat()
return self._get_errors()
def _reformat(self) -> None:
subprocess.run(
"isort .", cwd=self.project_dir, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
)
subprocess.run("black .", cwd=self.project_dir, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def _get_errors(self) -> Sequence[GeneratorError]:
errors = []
for collection in self.openapi.endpoint_collections_by_tag.values():
errors.extend(collection.parse_errors)
errors.extend(self.openapi.schemas.errors)
return errors
def _create_package(self) -> None:
self.package_dir.mkdir()
# Package __init__.py
package_init = self.package_dir / "__init__.py"
package_init_template = self.env.get_template("package_init.pyi")
package_init.write_text(package_init_template.render(description=self.package_description))
pytyped = self.package_dir / "py.typed"
pytyped.write_text("# Marker file for PEP 561")
def _build_metadata(self) -> None:
# Create a pyproject.toml file
pyproject_template = self.env.get_template("pyproject.toml")
pyproject_path = self.project_dir / "pyproject.toml"
pyproject_path.write_text(
pyproject_template.render(
project_name=self.project_name,
package_name=self.package_name,
version=self.version,
description=self.package_description,
)
)
# README.md
readme = self.project_dir / "README.md"
readme_template = self.env.get_template("README.md")
readme.write_text(
readme_template.render(
project_name=self.project_name, description=self.package_description, package_name=self.package_name
)
)
# .gitignore
git_ignore_path = self.project_dir / ".gitignore"
git_ignore_template = self.env.get_template(".gitignore")
git_ignore_path.write_text(git_ignore_template.render())
def _build_models(self) -> None:
# Generate models
models_dir = self.package_dir / "models"
models_dir.mkdir()
models_init = models_dir / "__init__.py"
imports = []
types_template = self.env.get_template("types.py")
types_path = models_dir / "types.py"
types_path.write_text(types_template.render())
model_template = self.env.get_template("model.pyi")
for model in self.openapi.schemas.models.values():
module_path = models_dir / f"{model.reference.module_name}.py"
module_path.write_text(model_template.render(model=model))
imports.append(import_string_from_reference(model.reference))
# Generate enums
enum_template = self.env.get_template("enum.pyi")
for enum in self.openapi.enums.values():
module_path = models_dir / f"{enum.reference.module_name}.py"
module_path.write_text(enum_template.render(enum=enum))
imports.append(import_string_from_reference(enum.reference))
models_init_template = self.env.get_template("models_init.pyi")
models_init.write_text(models_init_template.render(imports=imports))
def _build_api(self) -> None:
# Generate Client
client_path = self.package_dir / "client.py"
client_template = self.env.get_template("client.pyi")
client_path.write_text(client_template.render())
# Generate endpoints
api_dir = self.package_dir / "api"
api_dir.mkdir()
api_init = api_dir / "__init__.py"
api_init.write_text('""" Contains synchronous methods for accessing the API """')
async_api_dir = self.package_dir / "async_api"
async_api_dir.mkdir()
async_api_init = async_api_dir / "__init__.py"
async_api_init.write_text('""" Contains async methods for accessing the API """')
api_errors = self.package_dir / "errors.py"
errors_template = self.env.get_template("errors.pyi")
api_errors.write_text(errors_template.render())
endpoint_template = self.env.get_template("endpoint_module.pyi")
async_endpoint_template = self.env.get_template("async_endpoint_module.pyi")
for tag, collection in self.openapi.endpoint_collections_by_tag.items():
tag = utils.snake_case(tag)
module_path = api_dir / f"{tag}.py"
module_path.write_text(endpoint_template.render(collection=collection))
async_module_path = async_api_dir / f"{tag}.py"
async_module_path.write_text(async_endpoint_template.render(collection=collection))
| ./CrossVul/dataset_final_sorted/CWE-22/py/good_4110_1 |
crossvul-python_data_bad_4885_0 | # -*- coding: utf-8 -*-
"""
jinja2.nodes
~~~~~~~~~~~~
This module implements additional nodes derived from the ast base node.
It also provides some node tree helper functions like `in_lineno` and
`get_nodes` used by the parser and translator in order to normalize
python and jinja nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import types
import operator
from collections import deque
from jinja2.utils import Markup
from jinja2._compat import izip, with_metaclass, text_type
#: the types we support for context functions
_context_function_types = (types.FunctionType, types.MethodType)
_binop_to_func = {
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod,
'+': operator.add,
'-': operator.sub
}
_uaop_to_func = {
'not': operator.not_,
'+': operator.pos,
'-': operator.neg
}
_cmpop_to_func = {
'eq': operator.eq,
'ne': operator.ne,
'gt': operator.gt,
'gteq': operator.ge,
'lt': operator.lt,
'lteq': operator.le,
'in': lambda a, b: a in b,
'notin': lambda a, b: a not in b
}
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
class NodeType(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(cls, name, bases, d):
for attr in 'fields', 'attributes':
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) == 1, 'multiple inheritance not allowed'
assert len(storage) == len(set(storage)), 'layout conflict'
d[attr] = tuple(storage)
d.setdefault('abstract', False)
return type.__new__(cls, name, bases, d)
class EvalContext(object):
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
def __init__(self, environment, template_name=None):
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self):
return self.__dict__.copy()
def revert(self, old):
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
raise RuntimeError('if no eval context is passed, the '
'node must have an attached '
'environment.')
return EvalContext(node.environment)
return ctx
class Node(with_metaclass(NodeType, object)):
"""Baseclass for all Jinja2 nodes. There are a number of nodes available
of different types. There are four major types:
- :class:`Stmt`: statements
- :class:`Expr`: expressions
- :class:`Helper`: helper nodes
- :class:`Template`: the outermost wrapper node
All nodes have fields and attributes. Fields may be other nodes, lists,
or arbitrary values. Fields are passed to the constructor as regular
positional arguments, attributes as keyword arguments. Each node has
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
fields = ()
attributes = ('lineno', 'environment')
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
raise TypeError('abstract nodes are not instanciable')
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError('%r takes 0 arguments' %
self.__class__.__name__)
raise TypeError('%r takes 0 or %d argument%s' % (
self.__class__.__name__,
len(self.fields),
len(self.fields) != 1 and 's' or ''
))
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError('unknown attribute %r' %
next(iter(attributes)))
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (exclude is only is None) or \
(exclude is not None and name not in exclude) or \
(only is not None and name in only):
try:
yield name, getattr(self, name)
except AttributeError:
pass
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type):
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
def find_all(self, node_type):
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result
def set_ctx(self, ctx):
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
"""
todo = deque([self])
while todo:
node = todo.popleft()
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment):
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other):
return type(self) is type(other) and \
tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
# Restore Python 2 hashing behavior on Python 3
__hash__ = object.__hash__
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
arg in self.fields)
)
class Stmt(Node):
"""Base node for all statements."""
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
abstract = True
class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ('body',)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ('nodes',)
class Extends(Stmt):
"""Represents an extends statement."""
fields = ('template',)
class For(Stmt):
"""The for loop. `target` is the target for the iteration (usually a
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
of nodes that are used as loop-body, and `else_` a list of nodes for the
`else` block. If no else node exists it has to be an empty list.
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ('test', 'body', 'else_')
class Macro(Stmt):
"""A macro definition. `name` is the name of the macro, `args` a list of
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ('call', 'args', 'defaults', 'body')
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ('body', 'filter')
class Block(Stmt):
"""A node that represents a block."""
fields = ('name', 'body', 'scoped')
class Include(Stmt):
"""A node that represents the include tag."""
fields = ('template', 'with_context', 'ignore_missing')
class Import(Stmt):
"""A node that represents the import tag."""
fields = ('template', 'target', 'with_context')
class FromImport(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ('template', 'names', 'with_context')
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ('node',)
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ('target', 'node')
class AssignBlock(Stmt):
"""Assigns a block to a target."""
fields = ('target', 'body')
class Expr(Node):
"""Baseclass for all expressions."""
abstract = True
def as_const(self, eval_ctx=None):
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
An :class:`EvalContext` can be provided, if none is given
a default context is created which requires the nodes to have
an attached environment.
.. versionchanged:: 2.4
the `eval_ctx` parameter was added.
"""
raise Impossible()
def can_assign(self):
"""Check if it's possible to assign something to this node."""
return False
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ('left', 'right')
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_binops:
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception:
raise Impossible()
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ('node',)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_unops:
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except Exception:
raise Impossible()
class Name(Expr):
"""Looks up a name or stores a value in a name.
The `ctx` of the node can be one of the following values:
- `store`: store a value in the name
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
fields = ('name', 'ctx')
def can_assign(self):
return self.name not in ('true', 'false', 'none',
'True', 'False', 'None')
class Literal(Expr):
"""Baseclass for literals."""
abstract = True
class Const(Literal):
"""All constant values. The parser will return this node for simple
constants such as ``42`` or ``"foo"`` but it can be used to store more
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
fields = ('value',)
def as_const(self, eval_ctx=None):
return self.value
@classmethod
def from_untrusted(cls, value, lineno=None, environment=None):
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
"""
from .compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
"""A constant template string."""
fields = ('data',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ('items', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self):
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
class Pair(Helper):
"""A key, value pair for dicts."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ('test', 'expr1', 'expr2')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
class Filter(Expr):
"""This node applies a filter on an expression. `name` is the name of
the filter, the rest of the fields are the same as for :class:`Call`.
If the `node` of a filter is `None` the contents of the last buffer are
filtered. Buffers are created by macros and filter blocks.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or self.node is None:
raise Impossible()
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
# call in a list beause it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, 'contextfilter', False):
raise Impossible()
obj = self.node.as_const(eval_ctx)
args = [x.as_const(eval_ctx) for x in self.args]
if getattr(filter_, 'evalcontextfilter', False):
args.insert(0, eval_ctx)
elif getattr(filter_, 'environmentfilter', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return filter_(obj, *args, **kwargs)
except Exception:
raise Impossible()
class Test(Expr):
"""Applies a test on an expression. `name` is the name of the test, the
rest of the fields are the same as for :class:`Call`.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Call(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
obj = self.node.as_const(eval_ctx)
# don't evaluate context functions
args = [x.as_const(eval_ctx) for x in self.args]
if isinstance(obj, _context_function_types):
if getattr(obj, 'contextfunction', False):
raise Impossible()
elif getattr(obj, 'evalcontextfunction', False):
args.insert(0, eval_ctx)
elif getattr(obj, 'environmentfunction', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return obj(*args, **kwargs)
except Exception:
raise Impossible()
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.ctx != 'load':
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx),
self.arg.as_const(eval_ctx))
except Exception:
raise Impossible()
def can_assign(self):
return False
class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
fields = ('node', 'attr', 'ctx')
def as_const(self, eval_ctx=None):
if self.ctx != 'load':
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
return self.environment.getattr(self.node.as_const(eval_ctx),
self.attr)
except Exception:
raise Impossible()
def can_assign(self):
return False
class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ('start', 'stop', 'step')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
fields = ('nodes',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\s.
"""
fields = ('expr', 'ops')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
value = new_value
except Exception:
raise Impossible()
return result
class Operand(Helper):
"""Holds an operator and an expression."""
fields = ('op', 'expr')
if __debug__:
Operand.__doc__ += '\nThe following operators are available: ' + \
', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
set(_uaop_to_func) | set(_cmpop_to_func)))
class Mul(BinExpr):
"""Multiplies the left with the right node."""
operator = '*'
class Div(BinExpr):
"""Divides the left by the right node."""
operator = '/'
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
operator = '//'
class Add(BinExpr):
"""Add the left to the right node."""
operator = '+'
class Sub(BinExpr):
"""Subtract the right from the left node."""
operator = '-'
class Mod(BinExpr):
"""Left modulo right."""
operator = '%'
class Pow(BinExpr):
"""Left to the power of right."""
operator = '**'
class And(BinExpr):
"""Short circuited AND."""
operator = 'and'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
"""Short circuited OR."""
operator = 'or'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
"""Negate the expression."""
operator = 'not'
class Neg(UnaryExpr):
"""Make the expression negative."""
operator = '-'
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
operator = '+'
# Helpers for extensions
class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ('name',)
class ExtensionAttribute(Expr):
"""Returns the attribute of an extension bound to the environment.
The identifier is the identifier of the :class:`Extension`.
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
fields = ('identifier', 'name')
class ImportedName(Expr):
"""If created with an import name the import name is returned on node
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
fields = ('importname',)
class InternalName(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
fields = ('name',)
def __init__(self):
raise TypeError('Can\'t create internal names. Use the '
'`free_identifier` method on a parser.')
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class MarkSafeIfAutoescape(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
only if autoescaping is active.
.. versionadded:: 2.5
"""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
class ContextReference(Expr):
"""Returns the current template context. It can be used like a
:class:`Name` node, with a ``'load'`` ctx and will return the
current :class:`~jinja2.runtime.Context` object.
Here an example that assigns the current template name to a
variable named `foo`::
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
"""
class Continue(Stmt):
"""Continue a loop."""
class Break(Stmt):
"""Break a loop."""
class Scope(Stmt):
"""An artificial scope."""
fields = ('body',)
class EvalContextModifier(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ('options',)
class ScopedEvalContextModifier(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ('body',)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
raise TypeError('can\'t create custom node types')
NodeType.__new__ = staticmethod(_failing_new); del _failing_new
| ./CrossVul/dataset_final_sorted/CWE-134/py/bad_4885_0 |
crossvul-python_data_good_4885_1 | # -*- coding: utf-8 -*-
"""
jinja2.sandbox
~~~~~~~~~~~~~~
Adds a sandbox layer to Jinja as it was the default behavior in the old
Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
default behavior is easier to use.
The behavior can be changed by subclassing the environment.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
import types
import operator
from collections import Mapping
from jinja2.environment import Environment
from jinja2.exceptions import SecurityError
from jinja2._compat import string_types, text_type, PY2
from jinja2.utils import Markup
has_format = False
if hasattr(text_type, 'format'):
from markupsafe import EscapeFormatter
from string import Formatter
has_format = True
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
if PY2:
UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
'func_defaults', 'func_globals'])
else:
# On versions > python 2 the special attributes on functions are gone,
# but they remain on methods and generators for whatever reason.
UNSAFE_FUNCTION_ATTRIBUTES = set()
#: unsafe method attributes. function attributes are unsafe for methods too
UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
#: unsafe generator attirbutes.
UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
#: unsafe attributes on coroutines
UNSAFE_COROUTINE_ATTRIBUTES = set(['cr_frame', 'cr_code'])
#: unsafe attributes on async generators
UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = set(['ag_code', 'ag_frame'])
import warnings
# make sure we don't warn in python 2.6 about stuff we don't care about
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
module='jinja2.sandbox')
from collections import deque
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
pass
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
try:
from collections import MutableSet, MutableMapping, MutableSequence
_mutable_set_types += (MutableSet,)
_mutable_mapping_types += (MutableMapping,)
_mutable_sequence_types += (MutableSequence,)
except ImportError:
pass
_mutable_spec = (
(_mutable_set_types, frozenset([
'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
'symmetric_difference_update', 'update'
])),
(_mutable_mapping_types, frozenset([
'clear', 'pop', 'popitem', 'setdefault', 'update'
])),
(_mutable_sequence_types, frozenset([
'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
])),
(deque, frozenset([
'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
'popleft', 'remove', 'rotate'
]))
)
class _MagicFormatMapping(Mapping):
"""This class implements a dummy wrapper to fix a bug in the Python
standard library for string formatting.
See http://bugs.python.org/issue13598 for information about why
this is necessary.
"""
def __init__(self, args, kwargs):
self._args = args
self._kwargs = kwargs
self._last_index = 0
def __getitem__(self, key):
if key == '':
idx = self._last_index
self._last_index += 1
try:
return self._args[idx]
except LookupError:
pass
key = str(idx)
return self._kwargs[key]
def __iter__(self):
return iter(self._kwargs)
def __len__(self):
return len(self._kwargs)
def inspect_format_method(callable):
if not has_format:
return None
if not isinstance(callable, (types.MethodType,
types.BuiltinMethodType)) or \
callable.__name__ != 'format':
return None
obj = callable.__self__
if isinstance(obj, string_types):
return obj
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range(*args)
if len(rng) > MAX_RANGE:
raise OverflowError('range too big, maximum size for range is %d' %
MAX_RANGE)
return rng
def unsafe(f):
"""Marks a function or method as unsafe.
::
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(str, "mro")
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, types.FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, types.MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == 'mro':
return True
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
elif hasattr(types, 'CoroutineType') and isinstance(obj, types.CoroutineType):
if attr in UNSAFE_COROUTINE_ATTRIBUTES:
return True
elif hasattr(types, 'AsyncGeneratorType') and isinstance(obj, types.AsyncGeneratorType):
if attri in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
return True
return attr.startswith('__')
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occur during the rendering so
the caller has to ensure that all exceptions are caught.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table = {
'+': operator.pos,
'-': operator.neg
}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops = frozenset()
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
method returns `True`, :meth:`call_unop` is excuted for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
The following unary operators are interceptable: ``+`` and ``-``
Intercepted calls are always slower than the native operator call,
so make sure only to intercept the ones you are interested in.
.. versionadded:: 2.6
"""
return False
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals['range'] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith('_') or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (getattr(obj, 'unsafe_callable', False) or
getattr(obj, 'alters_data', False))
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context, operator, arg):
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined('access to attribute %r of %r '
'object is unsafe.' % (
attribute,
obj.__class__.__name__
), name=attribute, obj=obj, exc=SecurityError)
def format_string(self, s, args, kwargs):
"""If a format call is detected, then this is routed through this
method so that our safety sandbox can be used for it.
"""
if isinstance(s, Markup):
formatter = SandboxedEscapeFormatter(self, s.escape)
else:
formatter = SandboxedFormatter(self)
kwargs = _MagicFormatMapping(args, kwargs)
rv = formatter.vformat(s, args, kwargs)
return type(s)(rv)
def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
fmt = inspect_format_method(__obj)
if fmt is not None:
return __self.format_string(fmt, args, kwargs)
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
if has_format:
# This really is not a public API apparenlty.
try:
from _string import formatter_field_name_split
except ImportError:
def formatter_field_name_split(field_name):
return field_name._formatter_field_name_split()
class SandboxedFormatterMixin(object):
def __init__(self, env):
self._env = env
def get_field(self, field_name, args, kwargs):
first, rest = formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
for is_attr, i in rest:
if is_attr:
obj = self._env.getattr(obj, i)
else:
obj = self._env.getitem(obj, i)
return obj, first
class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
def __init__(self, env):
SandboxedFormatterMixin.__init__(self, env)
Formatter.__init__(self)
class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
def __init__(self, env, escape):
SandboxedFormatterMixin.__init__(self, env)
EscapeFormatter.__init__(self, escape)
| ./CrossVul/dataset_final_sorted/CWE-134/py/good_4885_1 |
crossvul-python_data_bad_4885_1 | # -*- coding: utf-8 -*-
"""
jinja2.sandbox
~~~~~~~~~~~~~~
Adds a sandbox layer to Jinja as it was the default behavior in the old
Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
default behavior is easier to use.
The behavior can be changed by subclassing the environment.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
import types
import operator
from jinja2.environment import Environment
from jinja2.exceptions import SecurityError
from jinja2._compat import string_types, PY2
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
if PY2:
UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
'func_defaults', 'func_globals'])
else:
# On versions > python 2 the special attributes on functions are gone,
# but they remain on methods and generators for whatever reason.
UNSAFE_FUNCTION_ATTRIBUTES = set()
#: unsafe method attributes. function attributes are unsafe for methods too
UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
#: unsafe generator attirbutes.
UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
import warnings
# make sure we don't warn in python 2.6 about stuff we don't care about
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
module='jinja2.sandbox')
from collections import deque
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
pass
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
try:
from collections import MutableSet, MutableMapping, MutableSequence
_mutable_set_types += (MutableSet,)
_mutable_mapping_types += (MutableMapping,)
_mutable_sequence_types += (MutableSequence,)
except ImportError:
pass
_mutable_spec = (
(_mutable_set_types, frozenset([
'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
'symmetric_difference_update', 'update'
])),
(_mutable_mapping_types, frozenset([
'clear', 'pop', 'popitem', 'setdefault', 'update'
])),
(_mutable_sequence_types, frozenset([
'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
])),
(deque, frozenset([
'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
'popleft', 'remove', 'rotate'
]))
)
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range(*args)
if len(rng) > MAX_RANGE:
raise OverflowError('range too big, maximum size for range is %d' %
MAX_RANGE)
return rng
def unsafe(f):
"""Marks a function or method as unsafe.
::
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(str, "mro")
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, types.FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, types.MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == 'mro':
return True
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
return attr.startswith('__')
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occour during the rendering so
the caller has to ensure that all exceptions are catched.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table = {
'+': operator.pos,
'-': operator.neg
}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops = frozenset()
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
method returns `True`, :meth:`call_unop` is excuted for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
The following unary operators are interceptable: ``+`` and ``-``
Intercepted calls are always slower than the native operator call,
so make sure only to intercept the ones you are interested in.
.. versionadded:: 2.6
"""
return False
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals['range'] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith('_') or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (getattr(obj, 'unsafe_callable', False) or
getattr(obj, 'alters_data', False))
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context, operator, arg):
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined('access to attribute %r of %r '
'object is unsafe.' % (
attribute,
obj.__class__.__name__
), name=attribute, obj=obj, exc=SecurityError)
def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
| ./CrossVul/dataset_final_sorted/CWE-134/py/bad_4885_1 |
crossvul-python_data_good_4885_0 | # -*- coding: utf-8 -*-
"""
jinja2.nodes
~~~~~~~~~~~~
This module implements additional nodes derived from the ast base node.
It also provides some node tree helper functions like `in_lineno` and
`get_nodes` used by the parser and translator in order to normalize
python and jinja nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import types
import operator
from collections import deque
from jinja2.utils import Markup
from jinja2._compat import izip, with_metaclass, text_type
#: the types we support for context functions
_context_function_types = (types.FunctionType, types.MethodType)
_binop_to_func = {
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod,
'+': operator.add,
'-': operator.sub
}
_uaop_to_func = {
'not': operator.not_,
'+': operator.pos,
'-': operator.neg
}
_cmpop_to_func = {
'eq': operator.eq,
'ne': operator.ne,
'gt': operator.gt,
'gteq': operator.ge,
'lt': operator.lt,
'lteq': operator.le,
'in': lambda a, b: a in b,
'notin': lambda a, b: a not in b
}
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
class NodeType(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(cls, name, bases, d):
for attr in 'fields', 'attributes':
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) == 1, 'multiple inheritance not allowed'
assert len(storage) == len(set(storage)), 'layout conflict'
d[attr] = tuple(storage)
d.setdefault('abstract', False)
return type.__new__(cls, name, bases, d)
class EvalContext(object):
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
def __init__(self, environment, template_name=None):
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self):
return self.__dict__.copy()
def revert(self, old):
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
raise RuntimeError('if no eval context is passed, the '
'node must have an attached '
'environment.')
return EvalContext(node.environment)
return ctx
class Node(with_metaclass(NodeType, object)):
"""Baseclass for all Jinja2 nodes. There are a number of nodes available
of different types. There are four major types:
- :class:`Stmt`: statements
- :class:`Expr`: expressions
- :class:`Helper`: helper nodes
- :class:`Template`: the outermost wrapper node
All nodes have fields and attributes. Fields may be other nodes, lists,
or arbitrary values. Fields are passed to the constructor as regular
positional arguments, attributes as keyword arguments. Each node has
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
fields = ()
attributes = ('lineno', 'environment')
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
raise TypeError('abstract nodes are not instanciable')
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError('%r takes 0 arguments' %
self.__class__.__name__)
raise TypeError('%r takes 0 or %d argument%s' % (
self.__class__.__name__,
len(self.fields),
len(self.fields) != 1 and 's' or ''
))
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError('unknown attribute %r' %
next(iter(attributes)))
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (exclude is only is None) or \
(exclude is not None and name not in exclude) or \
(only is not None and name in only):
try:
yield name, getattr(self, name)
except AttributeError:
pass
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type):
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
def find_all(self, node_type):
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result
def set_ctx(self, ctx):
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
"""
todo = deque([self])
while todo:
node = todo.popleft()
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment):
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other):
return type(self) is type(other) and \
tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
# Restore Python 2 hashing behavior on Python 3
__hash__ = object.__hash__
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
arg in self.fields)
)
class Stmt(Node):
"""Base node for all statements."""
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
abstract = True
class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ('body',)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ('nodes',)
class Extends(Stmt):
"""Represents an extends statement."""
fields = ('template',)
class For(Stmt):
"""The for loop. `target` is the target for the iteration (usually a
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
of nodes that are used as loop-body, and `else_` a list of nodes for the
`else` block. If no else node exists it has to be an empty list.
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ('test', 'body', 'else_')
class Macro(Stmt):
"""A macro definition. `name` is the name of the macro, `args` a list of
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ('call', 'args', 'defaults', 'body')
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ('body', 'filter')
class Block(Stmt):
"""A node that represents a block."""
fields = ('name', 'body', 'scoped')
class Include(Stmt):
"""A node that represents the include tag."""
fields = ('template', 'with_context', 'ignore_missing')
class Import(Stmt):
"""A node that represents the import tag."""
fields = ('template', 'target', 'with_context')
class FromImport(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ('template', 'names', 'with_context')
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ('node',)
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ('target', 'node')
class AssignBlock(Stmt):
"""Assigns a block to a target."""
fields = ('target', 'body')
class Expr(Node):
"""Baseclass for all expressions."""
abstract = True
def as_const(self, eval_ctx=None):
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
An :class:`EvalContext` can be provided, if none is given
a default context is created which requires the nodes to have
an attached environment.
.. versionchanged:: 2.4
the `eval_ctx` parameter was added.
"""
raise Impossible()
def can_assign(self):
"""Check if it's possible to assign something to this node."""
return False
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ('left', 'right')
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_binops:
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception:
raise Impossible()
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ('node',)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_unops:
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except Exception:
raise Impossible()
class Name(Expr):
"""Looks up a name or stores a value in a name.
The `ctx` of the node can be one of the following values:
- `store`: store a value in the name
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
fields = ('name', 'ctx')
def can_assign(self):
return self.name not in ('true', 'false', 'none',
'True', 'False', 'None')
class Literal(Expr):
"""Baseclass for literals."""
abstract = True
class Const(Literal):
"""All constant values. The parser will return this node for simple
constants such as ``42`` or ``"foo"`` but it can be used to store more
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
fields = ('value',)
def as_const(self, eval_ctx=None):
return self.value
@classmethod
def from_untrusted(cls, value, lineno=None, environment=None):
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
"""
from .compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
"""A constant template string."""
fields = ('data',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ('items', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self):
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
class Pair(Helper):
"""A key, value pair for dicts."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ('test', 'expr1', 'expr2')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
class Filter(Expr):
"""This node applies a filter on an expression. `name` is the name of
the filter, the rest of the fields are the same as for :class:`Call`.
If the `node` of a filter is `None` the contents of the last buffer are
filtered. Buffers are created by macros and filter blocks.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or self.node is None:
raise Impossible()
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
# call in a list beause it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, 'contextfilter', False):
raise Impossible()
obj = self.node.as_const(eval_ctx)
args = [x.as_const(eval_ctx) for x in self.args]
if getattr(filter_, 'evalcontextfilter', False):
args.insert(0, eval_ctx)
elif getattr(filter_, 'environmentfilter', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return filter_(obj, *args, **kwargs)
except Exception:
raise Impossible()
class Test(Expr):
"""Applies a test on an expression. `name` is the name of the test, the
rest of the fields are the same as for :class:`Call`.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Call(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or eval_ctx.environment.sandboxed:
raise Impossible()
obj = self.node.as_const(eval_ctx)
# don't evaluate context functions
args = [x.as_const(eval_ctx) for x in self.args]
if isinstance(obj, _context_function_types):
if getattr(obj, 'contextfunction', False):
raise Impossible()
elif getattr(obj, 'evalcontextfunction', False):
args.insert(0, eval_ctx)
elif getattr(obj, 'environmentfunction', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return obj(*args, **kwargs)
except Exception:
raise Impossible()
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.ctx != 'load':
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx),
self.arg.as_const(eval_ctx))
except Exception:
raise Impossible()
def can_assign(self):
return False
class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
fields = ('node', 'attr', 'ctx')
def as_const(self, eval_ctx=None):
if self.ctx != 'load':
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
return self.environment.getattr(self.node.as_const(eval_ctx),
self.attr)
except Exception:
raise Impossible()
def can_assign(self):
return False
class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ('start', 'stop', 'step')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
fields = ('nodes',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\s.
"""
fields = ('expr', 'ops')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
value = new_value
except Exception:
raise Impossible()
return result
class Operand(Helper):
"""Holds an operator and an expression."""
fields = ('op', 'expr')
if __debug__:
Operand.__doc__ += '\nThe following operators are available: ' + \
', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
set(_uaop_to_func) | set(_cmpop_to_func)))
class Mul(BinExpr):
"""Multiplies the left with the right node."""
operator = '*'
class Div(BinExpr):
"""Divides the left by the right node."""
operator = '/'
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
operator = '//'
class Add(BinExpr):
"""Add the left to the right node."""
operator = '+'
class Sub(BinExpr):
"""Subtract the right from the left node."""
operator = '-'
class Mod(BinExpr):
"""Left modulo right."""
operator = '%'
class Pow(BinExpr):
"""Left to the power of right."""
operator = '**'
class And(BinExpr):
"""Short circuited AND."""
operator = 'and'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
"""Short circuited OR."""
operator = 'or'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
"""Negate the expression."""
operator = 'not'
class Neg(UnaryExpr):
"""Make the expression negative."""
operator = '-'
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
operator = '+'
# Helpers for extensions
class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ('name',)
class ExtensionAttribute(Expr):
"""Returns the attribute of an extension bound to the environment.
The identifier is the identifier of the :class:`Extension`.
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
fields = ('identifier', 'name')
class ImportedName(Expr):
"""If created with an import name the import name is returned on node
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
fields = ('importname',)
class InternalName(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
fields = ('name',)
def __init__(self):
raise TypeError('Can\'t create internal names. Use the '
'`free_identifier` method on a parser.')
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class MarkSafeIfAutoescape(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
only if autoescaping is active.
.. versionadded:: 2.5
"""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
class ContextReference(Expr):
"""Returns the current template context. It can be used like a
:class:`Name` node, with a ``'load'`` ctx and will return the
current :class:`~jinja2.runtime.Context` object.
Here an example that assigns the current template name to a
variable named `foo`::
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
"""
class Continue(Stmt):
"""Continue a loop."""
class Break(Stmt):
"""Break a loop."""
class Scope(Stmt):
"""An artificial scope."""
fields = ('body',)
class EvalContextModifier(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ('options',)
class ScopedEvalContextModifier(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ('body',)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
raise TypeError('can\'t create custom node types')
NodeType.__new__ = staticmethod(_failing_new); del _failing_new
| ./CrossVul/dataset_final_sorted/CWE-134/py/good_4885_0 |
crossvul-python_data_bad_4805_0 | #!/usr/bin/env python
import re
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
install_requires = [
# core dependencies
'decorator',
'requests >= 1.0.0',
'future',
'paste',
'zope.interface',
'repoze.who',
'pycryptodomex',
'pytz',
'pyOpenSSL',
'python-dateutil',
'six'
]
version = ''
with open('src/saml2/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
setup(
name='pysaml2',
version=version,
description='Python implementation of SAML Version 2',
# long_description = read("README"),
author='Roland Hedberg',
author_email='roland.hedberg@adm.umu.se',
license='Apache 2.0',
url='https://github.com/rohe/pysaml2',
packages=['saml2', 'saml2/xmldsig', 'saml2/xmlenc', 'saml2/s2repoze',
'saml2/s2repoze.plugins', "saml2/profile", "saml2/schema",
"saml2/extension", "saml2/attributemaps", "saml2/authn_context",
"saml2/entity_category", "saml2/userinfo", "saml2/ws"],
package_dir={'': 'src'},
package_data={'': ['xml/*.xml']},
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5"
],
scripts=["tools/parse_xsd2.py", "tools/make_metadata.py",
"tools/mdexport.py", "tools/merge_metadata.py"],
install_requires=install_requires,
zip_safe=False,
)
| ./CrossVul/dataset_final_sorted/CWE-611/py/bad_4805_0 |
crossvul-python_data_good_4805_0 | #!/usr/bin/env python
import re
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
install_requires = [
# core dependencies
'decorator',
'requests >= 1.0.0',
'future',
'paste',
'zope.interface',
'repoze.who',
'pycryptodomex',
'pytz',
'pyOpenSSL',
'python-dateutil',
'defusedxml',
'six'
]
version = ''
with open('src/saml2/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
setup(
name='pysaml2',
version=version,
description='Python implementation of SAML Version 2',
# long_description = read("README"),
author='Roland Hedberg',
author_email='roland.hedberg@adm.umu.se',
license='Apache 2.0',
url='https://github.com/rohe/pysaml2',
packages=['saml2', 'saml2/xmldsig', 'saml2/xmlenc', 'saml2/s2repoze',
'saml2/s2repoze.plugins', "saml2/profile", "saml2/schema",
"saml2/extension", "saml2/attributemaps", "saml2/authn_context",
"saml2/entity_category", "saml2/userinfo", "saml2/ws"],
package_dir={'': 'src'},
package_data={'': ['xml/*.xml']},
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5"
],
scripts=["tools/parse_xsd2.py", "tools/make_metadata.py",
"tools/mdexport.py", "tools/merge_metadata.py"],
install_requires=install_requires,
zip_safe=False,
)
| ./CrossVul/dataset_final_sorted/CWE-611/py/good_4805_0 |
crossvul-python_data_bad_4805_3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""
Suppport for the client part of the SAML2.0 SOAP binding.
"""
import logging
from saml2 import create_class_from_element_tree
from saml2.samlp import NAMESPACE as SAMLP_NAMESPACE
from saml2.schema import soapenv
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
#noinspection PyUnresolvedReferences
from elementtree import ElementTree
logger = logging.getLogger(__name__)
class XmlParseError(Exception):
pass
class WrongMessageType(Exception):
pass
def parse_soap_enveloped_saml_response(text):
tags = ['{%s}Response' % SAMLP_NAMESPACE,
'{%s}LogoutResponse' % SAMLP_NAMESPACE]
return parse_soap_enveloped_saml_thingy(text, tags)
def parse_soap_enveloped_saml_logout_response(text):
tags = ['{%s}Response' % SAMLP_NAMESPACE,
'{%s}LogoutResponse' % SAMLP_NAMESPACE]
return parse_soap_enveloped_saml_thingy(text, tags)
def parse_soap_enveloped_saml_attribute_query(text):
expected_tag = '{%s}AttributeQuery' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_attribute_response(text):
tags = ['{%s}Response' % SAMLP_NAMESPACE,
'{%s}AttributeResponse' % SAMLP_NAMESPACE]
return parse_soap_enveloped_saml_thingy(text, tags)
def parse_soap_enveloped_saml_logout_request(text):
expected_tag = '{%s}LogoutRequest' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_authn_request(text):
expected_tag = '{%s}AuthnRequest' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_artifact_resolve(text):
expected_tag = '{%s}ArtifactResolve' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_artifact_response(text):
expected_tag = '{%s}ArtifactResponse' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_name_id_mapping_request(text):
expected_tag = '{%s}NameIDMappingRequest' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_name_id_mapping_response(text):
expected_tag = '{%s}NameIDMappingResponse' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_manage_name_id_request(text):
expected_tag = '{%s}ManageNameIDRequest' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_manage_name_id_response(text):
expected_tag = '{%s}ManageNameIDResponse' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_assertion_id_request(text):
expected_tag = '{%s}AssertionIDRequest' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_assertion_id_response(text):
tags = ['{%s}Response' % SAMLP_NAMESPACE,
'{%s}AssertionIDResponse' % SAMLP_NAMESPACE]
return parse_soap_enveloped_saml_thingy(text, tags)
def parse_soap_enveloped_saml_authn_query(text):
expected_tag = '{%s}AuthnQuery' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_authn_query_response(text):
tags = ['{%s}Response' % SAMLP_NAMESPACE]
return parse_soap_enveloped_saml_thingy(text, tags)
def parse_soap_enveloped_saml_authn_response(text):
tags = ['{%s}Response' % SAMLP_NAMESPACE]
return parse_soap_enveloped_saml_thingy(text, tags)
#def parse_soap_enveloped_saml_logout_response(text):
# expected_tag = '{%s}LogoutResponse' % SAMLP_NAMESPACE
# return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_thingy(text, expected_tags):
"""Parses a SOAP enveloped SAML thing and returns the thing as
a string.
:param text: The SOAP object as XML string
:param expected_tags: What the tag of the SAML thingy is expected to be.
:return: SAML thingy as a string
"""
envelope = ElementTree.fromstring(text)
# Make sure it's a SOAP message
assert envelope.tag == '{%s}Envelope' % soapenv.NAMESPACE
assert len(envelope) >= 1
body = None
for part in envelope:
if part.tag == '{%s}Body' % soapenv.NAMESPACE:
assert len(part) == 1
body = part
break
if body is None:
return ""
saml_part = body[0]
if saml_part.tag in expected_tags:
return ElementTree.tostring(saml_part, encoding="UTF-8")
else:
raise WrongMessageType("Was '%s' expected one of %s" % (saml_part.tag,
expected_tags))
import re
NS_AND_TAG = re.compile("\{([^}]+)\}(.*)")
def instanciate_class(item, modules):
m = NS_AND_TAG.match(item.tag)
ns, tag = m.groups()
for module in modules:
if module.NAMESPACE == ns:
try:
target = module.ELEMENT_BY_TAG[tag]
return create_class_from_element_tree(target, item)
except KeyError:
continue
raise Exception("Unknown class: ns='%s', tag='%s'" % (ns, tag))
def class_instances_from_soap_enveloped_saml_thingies(text, modules):
"""Parses a SOAP enveloped header and body SAML thing and returns the
thing as a dictionary class instance.
:param text: The SOAP object as XML
:param modules: modules representing xsd schemas
:return: The body and headers as class instances
"""
try:
envelope = ElementTree.fromstring(text)
except Exception as exc:
raise XmlParseError("%s" % exc)
assert envelope.tag == '{%s}Envelope' % soapenv.NAMESPACE
assert len(envelope) >= 1
env = {"header": [], "body": None}
for part in envelope:
if part.tag == '{%s}Body' % soapenv.NAMESPACE:
assert len(part) == 1
env["body"] = instanciate_class(part[0], modules)
elif part.tag == "{%s}Header" % soapenv.NAMESPACE:
for item in part:
env["header"].append(instanciate_class(item, modules))
return env
def open_soap_envelope(text):
"""
:param text: SOAP message
:return: dictionary with two keys "body"/"header"
"""
try:
envelope = ElementTree.fromstring(text)
except Exception as exc:
raise XmlParseError("%s" % exc)
assert envelope.tag == '{%s}Envelope' % soapenv.NAMESPACE
assert len(envelope) >= 1
content = {"header": [], "body": None}
for part in envelope:
if part.tag == '{%s}Body' % soapenv.NAMESPACE:
assert len(part) == 1
content["body"] = ElementTree.tostring(part[0], encoding="UTF-8")
elif part.tag == "{%s}Header" % soapenv.NAMESPACE:
for item in part:
_str = ElementTree.tostring(item, encoding="UTF-8")
content["header"].append(_str)
return content
def make_soap_enveloped_saml_thingy(thingy, headers=None):
""" Returns a soap envelope containing a SAML request
as a text string.
:param thingy: The SAML thingy
:return: The SOAP envelope as a string
"""
soap_envelope = soapenv.Envelope()
if headers:
_header = soapenv.Header()
_header.add_extension_elements(headers)
soap_envelope.header = _header
soap_envelope.body = soapenv.Body()
soap_envelope.body.add_extension_element(thingy)
return "%s" % soap_envelope
def soap_fault(message=None, actor=None, code=None, detail=None):
""" Create a SOAP Fault message
:param message: Human readable error message
:param actor: Who discovered the error
:param code: Error code
:param detail: More specific error message
:return: A SOAP Fault message as a string
"""
_string = _actor = _code = _detail = None
if message:
_string = soapenv.Fault_faultstring(text=message)
if actor:
_actor = soapenv.Fault_faultactor(text=actor)
if code:
_code = soapenv.Fault_faultcode(text=code)
if detail:
_detail = soapenv.Fault_detail(text=detail)
fault = soapenv.Fault(
faultcode=_code,
faultstring=_string,
faultactor=_actor,
detail=_detail,
)
return "%s" % fault
| ./CrossVul/dataset_final_sorted/CWE-611/py/bad_4805_3 |
crossvul-python_data_bad_4805_2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""Contains classes and functions that are necessary to implement
different bindings.
Bindings normally consists of three parts:
- rules about what to send
- how to package the information
- which protocol to use
"""
from six.moves.urllib.parse import urlparse, urlencode
import saml2
import base64
from saml2.s_utils import deflate_and_base64_encode
from saml2.s_utils import Unsupported
import logging
from saml2.sigver import REQ_ORDER
from saml2.sigver import RESP_ORDER
from saml2.sigver import SIGNER_ALGS
import six
from saml2.xmldsig import SIG_ALLOWED_ALG
logger = logging.getLogger(__name__)
try:
from xml.etree import cElementTree as ElementTree
if ElementTree.VERSION < '1.3.0':
# cElementTree has no support for register_namespace
# neither _namespace_map, thus we sacrify performance
# for correctness
from xml.etree import ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
from elementtree import ElementTree
NAMESPACE = "http://schemas.xmlsoap.org/soap/envelope/"
FORM_SPEC = """<form method="post" action="%s">
<input type="hidden" name="%s" value="%s" />
<input type="hidden" name="RelayState" value="%s" />
<input type="submit" value="Submit" />
</form>"""
def http_form_post_message(message, location, relay_state="",
typ="SAMLRequest", **kwargs):
"""The HTTP POST binding defines a mechanism by which SAML protocol
messages may be transmitted within the base64-encoded content of a
HTML form control.
:param message: The message
:param location: Where the form should be posted to
:param relay_state: for preserving and conveying state information
:return: A tuple containing header information and a HTML message.
"""
response = ["<head>", """<title>SAML 2.0 POST</title>""", "</head><body>"]
if not isinstance(message, six.string_types):
message = str(message)
if not isinstance(message, six.binary_type):
message = message.encode('utf-8')
if typ == "SAMLRequest" or typ == "SAMLResponse":
_msg = base64.b64encode(message)
else:
_msg = message
_msg = _msg.decode('ascii')
response.append(FORM_SPEC % (location, typ, _msg, relay_state))
response.append("""<script type="text/javascript">""")
response.append(" window.onload = function ()")
response.append(" { document.forms[0].submit(); }")
response.append("""</script>""")
response.append("</body>")
return {"headers": [("Content-type", "text/html")], "data": response}
def http_post_message(message, relay_state="", typ="SAMLRequest", **kwargs):
"""
:param message: The message
:param relay_state: for preserving and conveying state information
:return: A tuple containing header information and a HTML message.
"""
if not isinstance(message, six.string_types):
message = str(message)
if not isinstance(message, six.binary_type):
message = message.encode('utf-8')
if typ == "SAMLRequest" or typ == "SAMLResponse":
_msg = base64.b64encode(message)
else:
_msg = message
_msg = _msg.decode('ascii')
part = {typ: _msg}
if relay_state:
part["RelayState"] = relay_state
return {"headers": [("Content-type", 'application/x-www-form-urlencoded')],
"data": urlencode(part)}
def http_redirect_message(message, location, relay_state="", typ="SAMLRequest",
sigalg='', signer=None, **kwargs):
"""The HTTP Redirect binding defines a mechanism by which SAML protocol
messages can be transmitted within URL parameters.
Messages are encoded for use with this binding using a URL encoding
technique, and transmitted using the HTTP GET method.
The DEFLATE Encoding is used in this function.
:param message: The message
:param location: Where the message should be posted to
:param relay_state: for preserving and conveying state information
:param typ: What type of message it is SAMLRequest/SAMLResponse/SAMLart
:param sigalg: Which algorithm the signature function will use to sign
the message
:param signer: A signature function that can be used to sign the message
:return: A tuple containing header information and a HTML message.
"""
if not isinstance(message, six.string_types):
message = "%s" % (message,)
_order = None
if typ in ["SAMLRequest", "SAMLResponse"]:
if typ == "SAMLRequest":
_order = REQ_ORDER
else:
_order = RESP_ORDER
args = {typ: deflate_and_base64_encode(message)}
elif typ == "SAMLart":
args = {typ: message}
else:
raise Exception("Unknown message type: %s" % typ)
if relay_state:
args["RelayState"] = relay_state
if signer:
# sigalgs, should be one defined in xmldsig
assert sigalg in [b for a, b in SIG_ALLOWED_ALG]
args["SigAlg"] = sigalg
string = "&".join([urlencode({k: args[k]})
for k in _order if k in args]).encode('ascii')
args["Signature"] = base64.b64encode(signer.sign(string))
string = urlencode(args)
else:
string = urlencode(args)
glue_char = "&" if urlparse(location).query else "?"
login_url = glue_char.join([location, string])
headers = [('Location', str(login_url))]
body = []
return {"headers": headers, "data": body}
DUMMY_NAMESPACE = "http://example.org/"
PREFIX = '<?xml version="1.0" encoding="UTF-8"?>'
def make_soap_enveloped_saml_thingy(thingy, header_parts=None):
""" Returns a soap envelope containing a SAML request
as a text string.
:param thingy: The SAML thingy
:return: The SOAP envelope as a string
"""
envelope = ElementTree.Element('')
envelope.tag = '{%s}Envelope' % NAMESPACE
if header_parts:
header = ElementTree.Element('')
header.tag = '{%s}Header' % NAMESPACE
envelope.append(header)
for part in header_parts:
# This doesn't work if the headers are signed
part.become_child_element_of(header)
body = ElementTree.Element('')
body.tag = '{%s}Body' % NAMESPACE
envelope.append(body)
if isinstance(thingy, six.string_types):
# remove the first XML version/encoding line
if thingy[0:5].lower() == '<?xml':
logger.debug("thingy0: %s", thingy)
_part = thingy.split("\n")
thingy = "".join(_part[1:])
thingy = thingy.replace(PREFIX, "")
logger.debug("thingy: %s", thingy)
_child = ElementTree.Element('')
_child.tag = '{%s}FuddleMuddle' % DUMMY_NAMESPACE
body.append(_child)
_str = ElementTree.tostring(envelope, encoding="UTF-8")
if isinstance(_str, six.binary_type):
_str = _str.decode('utf-8')
logger.debug("SOAP precursor: %s", _str)
# find an remove the namespace definition
i = _str.find(DUMMY_NAMESPACE)
j = _str.rfind("xmlns:", 0, i)
cut1 = _str[j:i + len(DUMMY_NAMESPACE) + 1]
_str = _str.replace(cut1, "")
first = _str.find("<%s:FuddleMuddle" % (cut1[6:9],))
last = _str.find(">", first + 14)
cut2 = _str[first:last + 1]
return _str.replace(cut2, thingy)
else:
thingy.become_child_element_of(body)
return ElementTree.tostring(envelope, encoding="UTF-8")
def http_soap_message(message):
return {"headers": [("Content-type", "application/soap+xml")],
"data": make_soap_enveloped_saml_thingy(message)}
def http_paos(message, extra=None):
return {"headers": [("Content-type", "application/soap+xml")],
"data": make_soap_enveloped_saml_thingy(message, extra)}
def parse_soap_enveloped_saml(text, body_class, header_class=None):
"""Parses a SOAP enveloped SAML thing and returns header parts and body
:param text: The SOAP object as XML
:return: header parts and body as saml.samlbase instances
"""
envelope = ElementTree.fromstring(text)
assert envelope.tag == '{%s}Envelope' % NAMESPACE
# print(len(envelope))
body = None
header = {}
for part in envelope:
# print(">",part.tag)
if part.tag == '{%s}Body' % NAMESPACE:
for sub in part:
try:
body = saml2.create_class_from_element_tree(body_class, sub)
except Exception:
raise Exception(
"Wrong body type (%s) in SOAP envelope" % sub.tag)
elif part.tag == '{%s}Header' % NAMESPACE:
if not header_class:
raise Exception("Header where I didn't expect one")
# print("--- HEADER ---")
for sub in part:
# print(">>",sub.tag)
for klass in header_class:
# print("?{%s}%s" % (klass.c_namespace,klass.c_tag))
if sub.tag == "{%s}%s" % (klass.c_namespace, klass.c_tag):
header[sub.tag] = \
saml2.create_class_from_element_tree(klass, sub)
break
return body, header
# -----------------------------------------------------------------------------
PACKING = {
saml2.BINDING_HTTP_REDIRECT: http_redirect_message,
saml2.BINDING_HTTP_POST: http_form_post_message,
}
def packager(identifier):
try:
return PACKING[identifier]
except KeyError:
raise Exception("Unknown binding type: %s" % identifier)
def factory(binding, message, location, relay_state="", typ="SAMLRequest",
**kwargs):
return PACKING[binding](message, location, relay_state, typ, **kwargs)
| ./CrossVul/dataset_final_sorted/CWE-611/py/bad_4805_2 |
crossvul-python_data_good_4805_1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Contains base classes representing SAML elements.
These codes were originally written by Jeffrey Scudder for
representing Saml elements. Takashi Matsuo had added some codes, and
changed some. Roland Hedberg rewrote the whole thing from bottom up so
barely anything but the original structures remained.
Module objective: provide data classes for SAML constructs. These
classes hide the XML-ness of SAML and provide a set of native Python
classes to interact with.
Conversions to and from XML should only be necessary when the SAML classes
"touch the wire" and are sent over HTTP. For this reason this module
provides methods and functions to convert SAML classes to and from strings.
"""
__version__ = "4.4.0"
import logging
import six
from saml2.validate import valid_instance
try:
from xml.etree import cElementTree as ElementTree
if ElementTree.VERSION < '1.3.0':
# cElementTree has no support for register_namespace
# neither _namespace_map, thus we sacrify performance
# for correctness
from xml.etree import ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
from elementtree import ElementTree
import defusedxml.ElementTree
root_logger = logging.getLogger(__name__)
root_logger.level = logging.NOTSET
NAMESPACE = 'urn:oasis:names:tc:SAML:2.0:assertion'
# TEMPLATE = '{urn:oasis:names:tc:SAML:2.0:assertion}%s'
# XSI_NAMESPACE = 'http://www.w3.org/2001/XMLSchema-instance'
NAMEID_FORMAT_EMAILADDRESS = (
"urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress")
# These are defined in saml2.saml
# NAME_FORMAT_UNSPECIFIED = (
# "urn:oasis:names:tc:SAML:2.0:attrname-format:unspecified")
# NAME_FORMAT_URI = "urn:oasis:names:tc:SAML:2.0:attrname-format:uri"
# NAME_FORMAT_BASIC = "urn:oasis:names:tc:SAML:2.0:attrname-format:basic"
DECISION_TYPE_PERMIT = "Permit"
DECISION_TYPE_DENY = "Deny"
DECISION_TYPE_INDETERMINATE = "Indeterminate"
VERSION = "2.0"
BINDING_SOAP = 'urn:oasis:names:tc:SAML:2.0:bindings:SOAP'
BINDING_PAOS = 'urn:oasis:names:tc:SAML:2.0:bindings:PAOS'
BINDING_HTTP_REDIRECT = 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect'
BINDING_HTTP_POST = 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST'
BINDING_HTTP_ARTIFACT = 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Artifact'
BINDING_URI = 'urn:oasis:names:tc:SAML:2.0:bindings:URI'
def class_name(instance):
return "%s:%s" % (instance.c_namespace, instance.c_tag)
def create_class_from_xml_string(target_class, xml_string):
"""Creates an instance of the target class from a string.
:param target_class: The class which will be instantiated and populated
with the contents of the XML. This class must have a c_tag and a
c_namespace class variable.
:param xml_string: A string which contains valid XML. The root element
of the XML string should match the tag and namespace of the desired
class.
:return: An instance of the target class with members assigned according to
the contents of the XML - or None if the root XML tag and namespace did
not match those of the target class.
"""
if not isinstance(xml_string, six.binary_type):
xml_string = xml_string.encode('utf-8')
tree = defusedxml.ElementTree.fromstring(xml_string)
return create_class_from_element_tree(target_class, tree)
def create_class_from_element_tree(target_class, tree, namespace=None,
tag=None):
"""Instantiates the class and populates members according to the tree.
Note: Only use this function with classes that have c_namespace and c_tag
class members.
:param target_class: The class which will be instantiated and populated
with the contents of the XML.
:param tree: An element tree whose contents will be converted into
members of the new target_class instance.
:param namespace: The namespace which the XML tree's root node must
match. If omitted, the namespace defaults to the c_namespace of the
target class.
:param tag: The tag which the XML tree's root node must match. If
omitted, the tag defaults to the c_tag class member of the target
class.
:return: An instance of the target class - or None if the tag and namespace
of the XML tree's root node did not match the desired namespace and tag.
"""
if namespace is None:
namespace = target_class.c_namespace
if tag is None:
tag = target_class.c_tag
if tree.tag == '{%s}%s' % (namespace, tag):
target = target_class()
target.harvest_element_tree(tree)
return target
else:
return None
class Error(Exception):
"""Exception class thrown by this module."""
pass
class SAMLError(Exception):
pass
class ExtensionElement(object):
"""XML which is not part of the SAML specification,
these are called extension elements. If a classes parser
encounters an unexpected XML construct, it is translated into an
ExtensionElement instance. ExtensionElement is designed to fully
capture the information in the XML. Child nodes in an XML
extension are turned into ExtensionElements as well.
"""
def __init__(self, tag, namespace=None, attributes=None,
children=None, text=None):
"""Constructor for ExtensionElement
:param namespace: The XML namespace for this element.
:param tag: The tag (without the namespace qualifier) for
this element. To reconstruct the full qualified name of the
element, combine this tag with the namespace.
:param attributes: The attribute value string pairs for the XML
attributes of this element.
:param children: list (optional) A list of ExtensionElements which
represent the XML child nodes of this element.
"""
self.namespace = namespace
self.tag = tag
self.attributes = attributes or {}
self.children = children or []
self.text = text
def to_string(self):
""" Serialize the object into a XML string """
element_tree = self.transfer_to_element_tree()
return ElementTree.tostring(element_tree, encoding="UTF-8")
def transfer_to_element_tree(self):
if self.tag is None:
return None
element_tree = ElementTree.Element('')
if self.namespace is not None:
element_tree.tag = '{%s}%s' % (self.namespace, self.tag)
else:
element_tree.tag = self.tag
for key, value in iter(self.attributes.items()):
element_tree.attrib[key] = value
for child in self.children:
child.become_child_element_of(element_tree)
element_tree.text = self.text
return element_tree
def become_child_element_of(self, element_tree):
"""Converts this object into an etree element and adds it as a child
node in an etree element.
Adds self to the ElementTree. This method is required to avoid verbose
XML which constantly redefines the namespace.
:param element_tree: ElementTree._Element The element to which this
object's XML will be added.
"""
new_element = self.transfer_to_element_tree()
element_tree.append(new_element)
def find_children(self, tag=None, namespace=None):
"""Searches child nodes for objects with the desired tag/namespace.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all children in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
:param tag: str (optional) The desired tag
:param namespace: str (optional) The desired namespace
:return: A list of elements whose tag and/or namespace match the
parameters values
"""
results = []
if tag and namespace:
for element in self.children:
if element.tag == tag and element.namespace == namespace:
results.append(element)
elif tag and not namespace:
for element in self.children:
if element.tag == tag:
results.append(element)
elif namespace and not tag:
for element in self.children:
if element.namespace == namespace:
results.append(element)
else:
for element in self.children:
results.append(element)
return results
def loadd(self, ava):
""" expects a special set of keys """
if "attributes" in ava:
for key, val in ava["attributes"].items():
self.attributes[key] = val
try:
self.tag = ava["tag"]
except KeyError:
if not self.tag:
raise KeyError("ExtensionElement must have a tag")
try:
self.namespace = ava["namespace"]
except KeyError:
if not self.namespace:
raise KeyError("ExtensionElement must belong to a namespace")
try:
self.text = ava["text"]
except KeyError:
pass
if "children" in ava:
for item in ava["children"]:
self.children.append(ExtensionElement(item["tag"]).loadd(item))
return self
def extension_element_from_string(xml_string):
element_tree = defusedxml.ElementTree.fromstring(xml_string)
return _extension_element_from_element_tree(element_tree)
def _extension_element_from_element_tree(element_tree):
elementc_tag = element_tree.tag
if '}' in elementc_tag:
namespace = elementc_tag[1:elementc_tag.index('}')]
tag = elementc_tag[elementc_tag.index('}') + 1:]
else:
namespace = None
tag = elementc_tag
extension = ExtensionElement(namespace=namespace, tag=tag)
for key, value in iter(element_tree.attrib.items()):
extension.attributes[key] = value
for child in element_tree:
extension.children.append(_extension_element_from_element_tree(child))
extension.text = element_tree.text
return extension
class ExtensionContainer(object):
c_tag = ""
c_namespace = ""
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.encrypted_assertion = None
# Three methods to create an object from an ElementTree
def harvest_element_tree(self, tree):
# Fill in the instance members from the contents of the XML tree.
for child in tree:
self._convert_element_tree_to_member(child)
for attribute, value in iter(tree.attrib.items()):
self._convert_element_attribute_to_member(attribute, value)
self.text = tree.text
def _convert_element_tree_to_member(self, child_tree):
self.extension_elements.append(_extension_element_from_element_tree(
child_tree))
def _convert_element_attribute_to_member(self, attribute, value):
self.extension_attributes[attribute] = value
# One method to create an ElementTree from an object
def _add_members_to_element_tree(self, tree):
for child in self.extension_elements:
child.become_child_element_of(tree)
for attribute, value in iter(self.extension_attributes.items()):
tree.attrib[attribute] = value
tree.text = self.text
def find_extensions(self, tag=None, namespace=None):
"""Searches extension elements for child nodes with the desired name.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all extensions in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
:param tag: str (optional) The desired tag
:param namespace: str (optional) The desired namespace
:Return: A list of elements whose tag and/or namespace match the
parameters values
"""
results = []
if tag and namespace:
for element in self.extension_elements:
if element.tag == tag and element.namespace == namespace:
results.append(element)
elif tag and not namespace:
for element in self.extension_elements:
if element.tag == tag:
results.append(element)
elif namespace and not tag:
for element in self.extension_elements:
if element.namespace == namespace:
results.append(element)
else:
for element in self.extension_elements:
results.append(element)
return results
def extensions_as_elements(self, tag, schema):
""" Return extensions that has the given tag and belongs to the
given schema as native elements of that schema.
:param tag: The tag of the element
:param schema: Which schema the element should originate from
:return: a list of native elements
"""
result = []
for ext in self.find_extensions(tag, schema.NAMESPACE):
ets = schema.ELEMENT_FROM_STRING[tag]
result.append(ets(ext.to_string()))
return result
def add_extension_elements(self, items):
for item in items:
self.extension_elements.append(element_to_extension_element(item))
def add_extension_element(self, item):
self.extension_elements.append(element_to_extension_element(item))
def add_extension_attribute(self, name, value):
self.extension_attributes[name] = value
def make_vals(val, klass, klass_inst=None, prop=None, part=False,
base64encode=False):
"""
Creates a class instance with a specified value, the specified
class instance may be a value on a property in a defined class instance.
:param val: The value
:param klass: The value class
:param klass_inst: The class instance which has a property on which
what this function returns is a value.
:param prop: The property which the value should be assigned to.
:param part: If the value is one of a possible list of values it should be
handled slightly different compared to if it isn't.
:return: Value class instance
"""
cinst = None
# print("make_vals(%s, %s)" % (val, klass))
if isinstance(val, dict):
cinst = klass().loadd(val, base64encode=base64encode)
else:
try:
cinst = klass().set_text(val)
except ValueError:
if not part:
cis = [make_vals(sval, klass, klass_inst, prop, True,
base64encode) for sval in val]
setattr(klass_inst, prop, cis)
else:
raise
if part:
return cinst
else:
if cinst:
cis = [cinst]
setattr(klass_inst, prop, cis)
def make_instance(klass, spec, base64encode=False):
"""
Constructs a class instance containing the specified information
:param klass: The class
:param spec: Information to be placed in the instance (a dictionary)
:return: The instance
"""
return klass().loadd(spec, base64encode)
class SamlBase(ExtensionContainer):
"""A foundation class on which SAML classes are built. It
handles the parsing of attributes and children which are common to all
SAML classes. By default, the SamlBase class translates all XML child
nodes into ExtensionElements.
"""
c_children = {}
c_attributes = {}
c_attribute_type = {}
c_child_order = []
c_cardinality = {}
c_any = None
c_any_attribute = None
c_value_type = None
c_ns_prefix = None
def _get_all_c_children_with_order(self):
if len(self.c_child_order) > 0:
for child in self.c_child_order:
yield child
else:
for _, values in iter(self.__class__.c_children.items()):
yield values[0]
def _convert_element_tree_to_member(self, child_tree):
# Find the element's tag in this class's list of child members
if child_tree.tag in self.__class__.c_children:
member_name = self.__class__.c_children[child_tree.tag][0]
member_class = self.__class__.c_children[child_tree.tag][1]
# If the class member is supposed to contain a list, make sure the
# matching member is set to a list, then append the new member
# instance to the list.
if isinstance(member_class, list):
if getattr(self, member_name) is None:
setattr(self, member_name, [])
getattr(self, member_name).append(
create_class_from_element_tree(member_class[0], child_tree))
else:
setattr(self, member_name,
create_class_from_element_tree(member_class,
child_tree))
else:
ExtensionContainer._convert_element_tree_to_member(self, child_tree)
def _convert_element_attribute_to_member(self, attribute, value):
# Find the attribute in this class's list of attributes.
if attribute in self.__class__.c_attributes:
# Find the member of this class which corresponds to the XML
# attribute(lookup in current_class.c_attributes) and set this
# member to the desired value (using self.__dict__).
setattr(self, self.__class__.c_attributes[attribute][0], value)
else:
# If it doesn't appear in the attribute list it's an extension
ExtensionContainer._convert_element_attribute_to_member(
self, attribute, value)
# Three methods to create an ElementTree from an object
def _add_members_to_element_tree(self, tree):
# Convert the members of this class which are XML child nodes.
# This uses the class's c_children dictionary to find the members which
# should become XML child nodes.
for member_name in self._get_all_c_children_with_order():
member = getattr(self, member_name)
if member is None:
pass
elif isinstance(member, list):
for instance in member:
instance.become_child_element_of(tree)
else:
member.become_child_element_of(tree)
# Convert the members of this class which are XML attributes.
for xml_attribute, attribute_info in \
iter(self.__class__.c_attributes.items()):
(member_name, member_type, required) = attribute_info
member = getattr(self, member_name)
if member is not None:
tree.attrib[xml_attribute] = member
# Lastly, call the ExtensionContainers's _add_members_to_element_tree
# to convert any extension attributes.
ExtensionContainer._add_members_to_element_tree(self, tree)
def become_child_element_of(self, node):
"""
Note: Only for use with classes that have a c_tag and c_namespace class
member. It is in SamlBase so that it can be inherited but it should
not be called on instances of SamlBase.
:param node: The node to which this instance should be a child
"""
new_child = self._to_element_tree()
node.append(new_child)
def _to_element_tree(self):
"""
Note, this method is designed to be used only with classes that have a
c_tag and c_namespace. It is placed in SamlBase for inheritance but
should not be called on in this class.
"""
new_tree = ElementTree.Element('{%s}%s' % (self.__class__.c_namespace,
self.__class__.c_tag))
self._add_members_to_element_tree(new_tree)
return new_tree
def register_prefix(self, nspair):
"""
Register with ElementTree a set of namespaces
:param nspair: A dictionary of prefixes and uris to use when
constructing the text representation.
:return:
"""
for prefix, uri in nspair.items():
try:
ElementTree.register_namespace(prefix, uri)
except AttributeError:
# Backwards compatibility with ET < 1.3
ElementTree._namespace_map[uri] = prefix
except ValueError:
pass
def get_ns_map_attribute(self, attributes, uri_set):
for attribute in attributes:
if attribute[0] == "{":
uri, tag = attribute[1:].split("}")
uri_set.add(uri)
return uri_set
def tag_get_uri(self, elem):
if elem.tag[0] == "{":
uri, tag = elem.tag[1:].split("}")
return uri
return None
def get_ns_map(self, elements, uri_set):
for elem in elements:
uri_set = self.get_ns_map_attribute(elem.attrib, uri_set)
uri_set = self.get_ns_map(elem.getchildren(), uri_set)
uri = self.tag_get_uri(elem)
if uri is not None:
uri_set.add(uri)
return uri_set
def get_prefix_map(self, elements):
uri_set = self.get_ns_map(elements, set())
prefix_map = {}
for uri in sorted(uri_set):
prefix_map["encas%d" % len(prefix_map)] = uri
return prefix_map
def get_xml_string_with_self_contained_assertion_within_advice_encrypted_assertion(
self, assertion_tag, advice_tag):
for tmp_encrypted_assertion in \
self.assertion.advice.encrypted_assertion:
if tmp_encrypted_assertion.encrypted_data is None:
prefix_map = self.get_prefix_map([
tmp_encrypted_assertion._to_element_tree().find(
assertion_tag)])
tree = self._to_element_tree()
encs = tree.find(assertion_tag).find(advice_tag).findall(
tmp_encrypted_assertion._to_element_tree().tag)
for enc in encs:
assertion = enc.find(assertion_tag)
if assertion is not None:
self.set_prefixes(assertion, prefix_map)
return ElementTree.tostring(tree, encoding="UTF-8").decode('utf-8')
def get_xml_string_with_self_contained_assertion_within_encrypted_assertion(
self, assertion_tag):
""" Makes a encrypted assertion only containing self contained
namespaces.
:param assertion_tag: Tag for the assertion to be transformed.
:return: A new samlp.Resonse in string representation.
"""
prefix_map = self.get_prefix_map(
[self.encrypted_assertion._to_element_tree().find(assertion_tag)])
tree = self._to_element_tree()
self.set_prefixes(
tree.find(
self.encrypted_assertion._to_element_tree().tag).find(
assertion_tag), prefix_map)
return ElementTree.tostring(tree, encoding="UTF-8").decode('utf-8')
def set_prefixes(self, elem, prefix_map):
# check if this is a tree wrapper
if not ElementTree.iselement(elem):
elem = elem.getroot()
# build uri map and add to root element
uri_map = {}
for prefix, uri in prefix_map.items():
uri_map[uri] = prefix
elem.set("xmlns:" + prefix, uri)
# fixup all elements in the tree
memo = {}
for elem in elem.getiterator():
self.fixup_element_prefixes(elem, uri_map, memo)
def fixup_element_prefixes(self, elem, uri_map, memo):
def fixup(name):
try:
return memo[name]
except KeyError:
if name[0] != "{":
return
uri, tag = name[1:].split("}")
if uri in uri_map:
new_name = uri_map[uri] + ":" + tag
memo[name] = new_name
return new_name
# fix element name
name = fixup(elem.tag)
if name:
elem.tag = name
# fix attribute names
for key, value in elem.items():
name = fixup(key)
if name:
elem.set(name, value)
del elem.attrib[key]
def to_string_force_namespace(self, nspair):
elem = self._to_element_tree()
self.set_prefixes(elem, nspair)
return ElementTree.tostring(elem, encoding="UTF-8")
def to_string(self, nspair=None):
"""Converts the Saml object to a string containing XML.
:param nspair: A dictionary of prefixes and uris to use when
constructing the text representation.
:return: String representation of the object
"""
if not nspair and self.c_ns_prefix:
nspair = self.c_ns_prefix
if nspair:
self.register_prefix(nspair)
return ElementTree.tostring(self._to_element_tree(), encoding="UTF-8")
def __str__(self):
# Yes this is confusing. http://bugs.python.org/issue10942
x = self.to_string()
if not isinstance(x, six.string_types):
x = x.decode('utf-8')
return x
def keyswv(self):
""" Return the keys of attributes or children that has values
:return: list of keys
"""
return [key for key, val in self.__dict__.items() if val]
def keys(self):
""" Return all the keys that represent possible attributes and
children.
:return: list of keys
"""
keys = ['text']
keys.extend([n for (n, t, r) in self.c_attributes.values()])
keys.extend([v[0] for v in self.c_children.values()])
return keys
def children_with_values(self):
""" Returns all children that has values
:return: Possibly empty list of children.
"""
childs = []
for attribute in self._get_all_c_children_with_order():
member = getattr(self, attribute)
if member is None or member == []:
pass
elif isinstance(member, list):
for instance in member:
childs.append(instance)
else:
childs.append(member)
return childs
# noinspection PyUnusedLocal
def set_text(self, val, base64encode=False):
""" Sets the text property of this instance.
:param val: The value of the text property
:param base64encode: Whether the value should be base64encoded
:return: The instance
"""
# print("set_text: %s" % (val,))
if isinstance(val, bool):
if val:
setattr(self, "text", "true")
else:
setattr(self, "text", "false")
elif isinstance(val, int):
setattr(self, "text", "%d" % val)
elif isinstance(val, six.string_types):
setattr(self, "text", val)
elif val is None:
pass
else:
raise ValueError("Type shouldn't be '%s'" % (val,))
return self
def loadd(self, ava, base64encode=False):
"""
Sets attributes, children, extension elements and extension
attributes of this element instance depending on what is in
the given dictionary. If there are already values on properties
those will be overwritten. If the keys in the dictionary does
not correspond to known attributes/children/.. they are ignored.
:param ava: The dictionary
:param base64encode: Whether the values on attributes or texts on
children shoule be base64encoded.
:return: The instance
"""
for prop, _typ, _req in self.c_attributes.values():
# print("# %s" % (prop))
if prop in ava:
if isinstance(ava[prop], bool):
setattr(self, prop, "%s" % ava[prop])
elif isinstance(ava[prop], int):
setattr(self, prop, "%d" % ava[prop])
else:
setattr(self, prop, ava[prop])
if "text" in ava:
self.set_text(ava["text"], base64encode)
for prop, klassdef in self.c_children.values():
# print("## %s, %s" % (prop, klassdef))
if prop in ava:
# print("### %s" % ava[prop])
# means there can be a list of values
if isinstance(klassdef, list):
make_vals(ava[prop], klassdef[0], self, prop,
base64encode=base64encode)
else:
cis = make_vals(ava[prop], klassdef, self, prop, True,
base64encode)
setattr(self, prop, cis)
if "extension_elements" in ava:
for item in ava["extension_elements"]:
self.extension_elements.append(ExtensionElement(
item["tag"]).loadd(item))
if "extension_attributes" in ava:
for key, val in ava["extension_attributes"].items():
self.extension_attributes[key] = val
return self
def clear_text(self):
if self.text:
_text = self.text.strip()
if _text == "":
self.text = None
def __eq__(self, other):
try:
assert isinstance(other, SamlBase)
except AssertionError:
return False
self.clear_text()
other.clear_text()
if len(self.keyswv()) != len(other.keyswv()):
return False
for key in self.keyswv():
if key in ["_extatt"]:
continue
svals = self.__dict__[key]
ovals = other.__dict__[key]
if isinstance(svals, six.string_types):
if svals != ovals:
return False
elif isinstance(svals, list):
for sval in svals:
try:
for oval in ovals:
if sval == oval:
break
else:
return False
except TypeError:
# ovals isn't iterable
return False
else:
if svals == ovals: # Since I only support '=='
pass
else:
return False
return True
def child_class(self, child):
""" Return the class a child element should be an instance of
:param child: The name of the child element
:return: The class
"""
for prop, klassdef in self.c_children.values():
if child == prop:
if isinstance(klassdef, list):
return klassdef[0]
else:
return klassdef
return None
def child_cardinality(self, child):
""" Return the cardinality of a child element
:param child: The name of the child element
:return: The cardinality as a 2-tuple (min, max).
The max value is either a number or the string "unbounded".
The min value is always a number.
"""
for prop, klassdef in self.c_children.values():
if child == prop:
if isinstance(klassdef, list):
try:
_min = self.c_cardinality["min"]
except KeyError:
_min = 1
try:
_max = self.c_cardinality["max"]
except KeyError:
_max = "unbounded"
return _min, _max
else:
return 1, 1
return None
def verify(self):
return valid_instance(self)
def empty(self):
for prop, _typ, _req in self.c_attributes.values():
if getattr(self, prop, None):
return False
for prop, klassdef in self.c_children.values():
if getattr(self, prop):
return False
for param in ["text", "extension_elements", "extension_attributes"]:
if getattr(self, param):
return False
return True
# ----------------------------------------------------------------------------
def element_to_extension_element(element):
"""
Convert an element into a extension element
:param element: The element instance
:return: An extension element instance
"""
exel = ExtensionElement(element.c_tag, element.c_namespace,
text=element.text)
exel.attributes.update(element.extension_attributes)
exel.children.extend(element.extension_elements)
for xml_attribute, (member_name, typ, req) in \
iter(element.c_attributes.items()):
member_value = getattr(element, member_name)
if member_value is not None:
exel.attributes[xml_attribute] = member_value
exel.children.extend([element_to_extension_element(c) for c in
element.children_with_values()])
return exel
def extension_element_to_element(extension_element, translation_functions,
namespace=None):
""" Convert an extension element to a normal element.
In order to do this you need to have an idea of what type of
element it is. Or rather which module it belongs to.
:param extension_element: The extension element
:param translation_functions: A dictionary with class identifiers
as keys and string-to-element translations functions as values
:param namespace: The namespace of the translation functions.
:return: An element instance or None
"""
try:
element_namespace = extension_element.namespace
except AttributeError:
element_namespace = extension_element.c_namespace
if element_namespace == namespace:
try:
try:
ets = translation_functions[extension_element.tag]
except AttributeError:
ets = translation_functions[extension_element.c_tag]
return ets(extension_element.to_string())
except KeyError:
pass
return None
def extension_elements_to_elements(extension_elements, schemas):
""" Create a list of elements each one matching one of the
given extension elements. This is of course dependent on the access
to schemas that describe the extension elements.
:param extension_elements: The list of extension elements
:param schemas: Imported Python modules that represent the different
known schemas used for the extension elements
:return: A list of elements, representing the set of extension elements
that was possible to match against a Class in the given schemas.
The elements returned are the native representation of the elements
according to the schemas.
"""
res = []
if isinstance(schemas, list):
pass
elif isinstance(schemas, dict):
schemas = list(schemas.values())
else:
return res
for extension_element in extension_elements:
for schema in schemas:
inst = extension_element_to_element(extension_element,
schema.ELEMENT_FROM_STRING,
schema.NAMESPACE)
if inst:
res.append(inst)
break
return res
def extension_elements_as_dict(extension_elements, onts):
ees_ = extension_elements_to_elements(extension_elements, onts)
res = {}
for elem in ees_:
try:
res[elem.c_tag].append(elem)
except KeyError:
res[elem.c_tag] = [elem]
return res
| ./CrossVul/dataset_final_sorted/CWE-611/py/good_4805_1 |
crossvul-python_data_bad_4805_1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Contains base classes representing SAML elements.
These codes were originally written by Jeffrey Scudder for
representing Saml elements. Takashi Matsuo had added some codes, and
changed some. Roland Hedberg rewrote the whole thing from bottom up so
barely anything but the original structures remained.
Module objective: provide data classes for SAML constructs. These
classes hide the XML-ness of SAML and provide a set of native Python
classes to interact with.
Conversions to and from XML should only be necessary when the SAML classes
"touch the wire" and are sent over HTTP. For this reason this module
provides methods and functions to convert SAML classes to and from strings.
"""
__version__ = "4.4.0"
import logging
import six
from saml2.validate import valid_instance
try:
from xml.etree import cElementTree as ElementTree
if ElementTree.VERSION < '1.3.0':
# cElementTree has no support for register_namespace
# neither _namespace_map, thus we sacrify performance
# for correctness
from xml.etree import ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
from elementtree import ElementTree
root_logger = logging.getLogger(__name__)
root_logger.level = logging.NOTSET
NAMESPACE = 'urn:oasis:names:tc:SAML:2.0:assertion'
# TEMPLATE = '{urn:oasis:names:tc:SAML:2.0:assertion}%s'
# XSI_NAMESPACE = 'http://www.w3.org/2001/XMLSchema-instance'
NAMEID_FORMAT_EMAILADDRESS = (
"urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress")
# These are defined in saml2.saml
# NAME_FORMAT_UNSPECIFIED = (
# "urn:oasis:names:tc:SAML:2.0:attrname-format:unspecified")
# NAME_FORMAT_URI = "urn:oasis:names:tc:SAML:2.0:attrname-format:uri"
# NAME_FORMAT_BASIC = "urn:oasis:names:tc:SAML:2.0:attrname-format:basic"
DECISION_TYPE_PERMIT = "Permit"
DECISION_TYPE_DENY = "Deny"
DECISION_TYPE_INDETERMINATE = "Indeterminate"
VERSION = "2.0"
BINDING_SOAP = 'urn:oasis:names:tc:SAML:2.0:bindings:SOAP'
BINDING_PAOS = 'urn:oasis:names:tc:SAML:2.0:bindings:PAOS'
BINDING_HTTP_REDIRECT = 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect'
BINDING_HTTP_POST = 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST'
BINDING_HTTP_ARTIFACT = 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Artifact'
BINDING_URI = 'urn:oasis:names:tc:SAML:2.0:bindings:URI'
def class_name(instance):
return "%s:%s" % (instance.c_namespace, instance.c_tag)
def create_class_from_xml_string(target_class, xml_string):
"""Creates an instance of the target class from a string.
:param target_class: The class which will be instantiated and populated
with the contents of the XML. This class must have a c_tag and a
c_namespace class variable.
:param xml_string: A string which contains valid XML. The root element
of the XML string should match the tag and namespace of the desired
class.
:return: An instance of the target class with members assigned according to
the contents of the XML - or None if the root XML tag and namespace did
not match those of the target class.
"""
if not isinstance(xml_string, six.binary_type):
xml_string = xml_string.encode('utf-8')
tree = ElementTree.fromstring(xml_string)
return create_class_from_element_tree(target_class, tree)
def create_class_from_element_tree(target_class, tree, namespace=None,
tag=None):
"""Instantiates the class and populates members according to the tree.
Note: Only use this function with classes that have c_namespace and c_tag
class members.
:param target_class: The class which will be instantiated and populated
with the contents of the XML.
:param tree: An element tree whose contents will be converted into
members of the new target_class instance.
:param namespace: The namespace which the XML tree's root node must
match. If omitted, the namespace defaults to the c_namespace of the
target class.
:param tag: The tag which the XML tree's root node must match. If
omitted, the tag defaults to the c_tag class member of the target
class.
:return: An instance of the target class - or None if the tag and namespace
of the XML tree's root node did not match the desired namespace and tag.
"""
if namespace is None:
namespace = target_class.c_namespace
if tag is None:
tag = target_class.c_tag
if tree.tag == '{%s}%s' % (namespace, tag):
target = target_class()
target.harvest_element_tree(tree)
return target
else:
return None
class Error(Exception):
"""Exception class thrown by this module."""
pass
class SAMLError(Exception):
pass
class ExtensionElement(object):
"""XML which is not part of the SAML specification,
these are called extension elements. If a classes parser
encounters an unexpected XML construct, it is translated into an
ExtensionElement instance. ExtensionElement is designed to fully
capture the information in the XML. Child nodes in an XML
extension are turned into ExtensionElements as well.
"""
def __init__(self, tag, namespace=None, attributes=None,
children=None, text=None):
"""Constructor for ExtensionElement
:param namespace: The XML namespace for this element.
:param tag: The tag (without the namespace qualifier) for
this element. To reconstruct the full qualified name of the
element, combine this tag with the namespace.
:param attributes: The attribute value string pairs for the XML
attributes of this element.
:param children: list (optional) A list of ExtensionElements which
represent the XML child nodes of this element.
"""
self.namespace = namespace
self.tag = tag
self.attributes = attributes or {}
self.children = children or []
self.text = text
def to_string(self):
""" Serialize the object into a XML string """
element_tree = self.transfer_to_element_tree()
return ElementTree.tostring(element_tree, encoding="UTF-8")
def transfer_to_element_tree(self):
if self.tag is None:
return None
element_tree = ElementTree.Element('')
if self.namespace is not None:
element_tree.tag = '{%s}%s' % (self.namespace, self.tag)
else:
element_tree.tag = self.tag
for key, value in iter(self.attributes.items()):
element_tree.attrib[key] = value
for child in self.children:
child.become_child_element_of(element_tree)
element_tree.text = self.text
return element_tree
def become_child_element_of(self, element_tree):
"""Converts this object into an etree element and adds it as a child
node in an etree element.
Adds self to the ElementTree. This method is required to avoid verbose
XML which constantly redefines the namespace.
:param element_tree: ElementTree._Element The element to which this
object's XML will be added.
"""
new_element = self.transfer_to_element_tree()
element_tree.append(new_element)
def find_children(self, tag=None, namespace=None):
"""Searches child nodes for objects with the desired tag/namespace.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all children in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
:param tag: str (optional) The desired tag
:param namespace: str (optional) The desired namespace
:return: A list of elements whose tag and/or namespace match the
parameters values
"""
results = []
if tag and namespace:
for element in self.children:
if element.tag == tag and element.namespace == namespace:
results.append(element)
elif tag and not namespace:
for element in self.children:
if element.tag == tag:
results.append(element)
elif namespace and not tag:
for element in self.children:
if element.namespace == namespace:
results.append(element)
else:
for element in self.children:
results.append(element)
return results
def loadd(self, ava):
""" expects a special set of keys """
if "attributes" in ava:
for key, val in ava["attributes"].items():
self.attributes[key] = val
try:
self.tag = ava["tag"]
except KeyError:
if not self.tag:
raise KeyError("ExtensionElement must have a tag")
try:
self.namespace = ava["namespace"]
except KeyError:
if not self.namespace:
raise KeyError("ExtensionElement must belong to a namespace")
try:
self.text = ava["text"]
except KeyError:
pass
if "children" in ava:
for item in ava["children"]:
self.children.append(ExtensionElement(item["tag"]).loadd(item))
return self
def extension_element_from_string(xml_string):
element_tree = ElementTree.fromstring(xml_string)
return _extension_element_from_element_tree(element_tree)
def _extension_element_from_element_tree(element_tree):
elementc_tag = element_tree.tag
if '}' in elementc_tag:
namespace = elementc_tag[1:elementc_tag.index('}')]
tag = elementc_tag[elementc_tag.index('}') + 1:]
else:
namespace = None
tag = elementc_tag
extension = ExtensionElement(namespace=namespace, tag=tag)
for key, value in iter(element_tree.attrib.items()):
extension.attributes[key] = value
for child in element_tree:
extension.children.append(_extension_element_from_element_tree(child))
extension.text = element_tree.text
return extension
class ExtensionContainer(object):
c_tag = ""
c_namespace = ""
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.encrypted_assertion = None
# Three methods to create an object from an ElementTree
def harvest_element_tree(self, tree):
# Fill in the instance members from the contents of the XML tree.
for child in tree:
self._convert_element_tree_to_member(child)
for attribute, value in iter(tree.attrib.items()):
self._convert_element_attribute_to_member(attribute, value)
self.text = tree.text
def _convert_element_tree_to_member(self, child_tree):
self.extension_elements.append(_extension_element_from_element_tree(
child_tree))
def _convert_element_attribute_to_member(self, attribute, value):
self.extension_attributes[attribute] = value
# One method to create an ElementTree from an object
def _add_members_to_element_tree(self, tree):
for child in self.extension_elements:
child.become_child_element_of(tree)
for attribute, value in iter(self.extension_attributes.items()):
tree.attrib[attribute] = value
tree.text = self.text
def find_extensions(self, tag=None, namespace=None):
"""Searches extension elements for child nodes with the desired name.
Returns a list of extension elements within this object whose tag
and/or namespace match those passed in. To find all extensions in
a particular namespace, specify the namespace but not the tag name.
If you specify only the tag, the result list may contain extension
elements in multiple namespaces.
:param tag: str (optional) The desired tag
:param namespace: str (optional) The desired namespace
:Return: A list of elements whose tag and/or namespace match the
parameters values
"""
results = []
if tag and namespace:
for element in self.extension_elements:
if element.tag == tag and element.namespace == namespace:
results.append(element)
elif tag and not namespace:
for element in self.extension_elements:
if element.tag == tag:
results.append(element)
elif namespace and not tag:
for element in self.extension_elements:
if element.namespace == namespace:
results.append(element)
else:
for element in self.extension_elements:
results.append(element)
return results
def extensions_as_elements(self, tag, schema):
""" Return extensions that has the given tag and belongs to the
given schema as native elements of that schema.
:param tag: The tag of the element
:param schema: Which schema the element should originate from
:return: a list of native elements
"""
result = []
for ext in self.find_extensions(tag, schema.NAMESPACE):
ets = schema.ELEMENT_FROM_STRING[tag]
result.append(ets(ext.to_string()))
return result
def add_extension_elements(self, items):
for item in items:
self.extension_elements.append(element_to_extension_element(item))
def add_extension_element(self, item):
self.extension_elements.append(element_to_extension_element(item))
def add_extension_attribute(self, name, value):
self.extension_attributes[name] = value
def make_vals(val, klass, klass_inst=None, prop=None, part=False,
base64encode=False):
"""
Creates a class instance with a specified value, the specified
class instance may be a value on a property in a defined class instance.
:param val: The value
:param klass: The value class
:param klass_inst: The class instance which has a property on which
what this function returns is a value.
:param prop: The property which the value should be assigned to.
:param part: If the value is one of a possible list of values it should be
handled slightly different compared to if it isn't.
:return: Value class instance
"""
cinst = None
# print("make_vals(%s, %s)" % (val, klass))
if isinstance(val, dict):
cinst = klass().loadd(val, base64encode=base64encode)
else:
try:
cinst = klass().set_text(val)
except ValueError:
if not part:
cis = [make_vals(sval, klass, klass_inst, prop, True,
base64encode) for sval in val]
setattr(klass_inst, prop, cis)
else:
raise
if part:
return cinst
else:
if cinst:
cis = [cinst]
setattr(klass_inst, prop, cis)
def make_instance(klass, spec, base64encode=False):
"""
Constructs a class instance containing the specified information
:param klass: The class
:param spec: Information to be placed in the instance (a dictionary)
:return: The instance
"""
return klass().loadd(spec, base64encode)
class SamlBase(ExtensionContainer):
"""A foundation class on which SAML classes are built. It
handles the parsing of attributes and children which are common to all
SAML classes. By default, the SamlBase class translates all XML child
nodes into ExtensionElements.
"""
c_children = {}
c_attributes = {}
c_attribute_type = {}
c_child_order = []
c_cardinality = {}
c_any = None
c_any_attribute = None
c_value_type = None
c_ns_prefix = None
def _get_all_c_children_with_order(self):
if len(self.c_child_order) > 0:
for child in self.c_child_order:
yield child
else:
for _, values in iter(self.__class__.c_children.items()):
yield values[0]
def _convert_element_tree_to_member(self, child_tree):
# Find the element's tag in this class's list of child members
if child_tree.tag in self.__class__.c_children:
member_name = self.__class__.c_children[child_tree.tag][0]
member_class = self.__class__.c_children[child_tree.tag][1]
# If the class member is supposed to contain a list, make sure the
# matching member is set to a list, then append the new member
# instance to the list.
if isinstance(member_class, list):
if getattr(self, member_name) is None:
setattr(self, member_name, [])
getattr(self, member_name).append(
create_class_from_element_tree(member_class[0], child_tree))
else:
setattr(self, member_name,
create_class_from_element_tree(member_class,
child_tree))
else:
ExtensionContainer._convert_element_tree_to_member(self, child_tree)
def _convert_element_attribute_to_member(self, attribute, value):
# Find the attribute in this class's list of attributes.
if attribute in self.__class__.c_attributes:
# Find the member of this class which corresponds to the XML
# attribute(lookup in current_class.c_attributes) and set this
# member to the desired value (using self.__dict__).
setattr(self, self.__class__.c_attributes[attribute][0], value)
else:
# If it doesn't appear in the attribute list it's an extension
ExtensionContainer._convert_element_attribute_to_member(
self, attribute, value)
# Three methods to create an ElementTree from an object
def _add_members_to_element_tree(self, tree):
# Convert the members of this class which are XML child nodes.
# This uses the class's c_children dictionary to find the members which
# should become XML child nodes.
for member_name in self._get_all_c_children_with_order():
member = getattr(self, member_name)
if member is None:
pass
elif isinstance(member, list):
for instance in member:
instance.become_child_element_of(tree)
else:
member.become_child_element_of(tree)
# Convert the members of this class which are XML attributes.
for xml_attribute, attribute_info in \
iter(self.__class__.c_attributes.items()):
(member_name, member_type, required) = attribute_info
member = getattr(self, member_name)
if member is not None:
tree.attrib[xml_attribute] = member
# Lastly, call the ExtensionContainers's _add_members_to_element_tree
# to convert any extension attributes.
ExtensionContainer._add_members_to_element_tree(self, tree)
def become_child_element_of(self, node):
"""
Note: Only for use with classes that have a c_tag and c_namespace class
member. It is in SamlBase so that it can be inherited but it should
not be called on instances of SamlBase.
:param node: The node to which this instance should be a child
"""
new_child = self._to_element_tree()
node.append(new_child)
def _to_element_tree(self):
"""
Note, this method is designed to be used only with classes that have a
c_tag and c_namespace. It is placed in SamlBase for inheritance but
should not be called on in this class.
"""
new_tree = ElementTree.Element('{%s}%s' % (self.__class__.c_namespace,
self.__class__.c_tag))
self._add_members_to_element_tree(new_tree)
return new_tree
def register_prefix(self, nspair):
"""
Register with ElementTree a set of namespaces
:param nspair: A dictionary of prefixes and uris to use when
constructing the text representation.
:return:
"""
for prefix, uri in nspair.items():
try:
ElementTree.register_namespace(prefix, uri)
except AttributeError:
# Backwards compatibility with ET < 1.3
ElementTree._namespace_map[uri] = prefix
except ValueError:
pass
def get_ns_map_attribute(self, attributes, uri_set):
for attribute in attributes:
if attribute[0] == "{":
uri, tag = attribute[1:].split("}")
uri_set.add(uri)
return uri_set
def tag_get_uri(self, elem):
if elem.tag[0] == "{":
uri, tag = elem.tag[1:].split("}")
return uri
return None
def get_ns_map(self, elements, uri_set):
for elem in elements:
uri_set = self.get_ns_map_attribute(elem.attrib, uri_set)
uri_set = self.get_ns_map(elem.getchildren(), uri_set)
uri = self.tag_get_uri(elem)
if uri is not None:
uri_set.add(uri)
return uri_set
def get_prefix_map(self, elements):
uri_set = self.get_ns_map(elements, set())
prefix_map = {}
for uri in sorted(uri_set):
prefix_map["encas%d" % len(prefix_map)] = uri
return prefix_map
def get_xml_string_with_self_contained_assertion_within_advice_encrypted_assertion(
self, assertion_tag, advice_tag):
for tmp_encrypted_assertion in \
self.assertion.advice.encrypted_assertion:
if tmp_encrypted_assertion.encrypted_data is None:
prefix_map = self.get_prefix_map([
tmp_encrypted_assertion._to_element_tree().find(
assertion_tag)])
tree = self._to_element_tree()
encs = tree.find(assertion_tag).find(advice_tag).findall(
tmp_encrypted_assertion._to_element_tree().tag)
for enc in encs:
assertion = enc.find(assertion_tag)
if assertion is not None:
self.set_prefixes(assertion, prefix_map)
return ElementTree.tostring(tree, encoding="UTF-8").decode('utf-8')
def get_xml_string_with_self_contained_assertion_within_encrypted_assertion(
self, assertion_tag):
""" Makes a encrypted assertion only containing self contained
namespaces.
:param assertion_tag: Tag for the assertion to be transformed.
:return: A new samlp.Resonse in string representation.
"""
prefix_map = self.get_prefix_map(
[self.encrypted_assertion._to_element_tree().find(assertion_tag)])
tree = self._to_element_tree()
self.set_prefixes(
tree.find(
self.encrypted_assertion._to_element_tree().tag).find(
assertion_tag), prefix_map)
return ElementTree.tostring(tree, encoding="UTF-8").decode('utf-8')
def set_prefixes(self, elem, prefix_map):
# check if this is a tree wrapper
if not ElementTree.iselement(elem):
elem = elem.getroot()
# build uri map and add to root element
uri_map = {}
for prefix, uri in prefix_map.items():
uri_map[uri] = prefix
elem.set("xmlns:" + prefix, uri)
# fixup all elements in the tree
memo = {}
for elem in elem.getiterator():
self.fixup_element_prefixes(elem, uri_map, memo)
def fixup_element_prefixes(self, elem, uri_map, memo):
def fixup(name):
try:
return memo[name]
except KeyError:
if name[0] != "{":
return
uri, tag = name[1:].split("}")
if uri in uri_map:
new_name = uri_map[uri] + ":" + tag
memo[name] = new_name
return new_name
# fix element name
name = fixup(elem.tag)
if name:
elem.tag = name
# fix attribute names
for key, value in elem.items():
name = fixup(key)
if name:
elem.set(name, value)
del elem.attrib[key]
def to_string_force_namespace(self, nspair):
elem = self._to_element_tree()
self.set_prefixes(elem, nspair)
return ElementTree.tostring(elem, encoding="UTF-8")
def to_string(self, nspair=None):
"""Converts the Saml object to a string containing XML.
:param nspair: A dictionary of prefixes and uris to use when
constructing the text representation.
:return: String representation of the object
"""
if not nspair and self.c_ns_prefix:
nspair = self.c_ns_prefix
if nspair:
self.register_prefix(nspair)
return ElementTree.tostring(self._to_element_tree(), encoding="UTF-8")
def __str__(self):
# Yes this is confusing. http://bugs.python.org/issue10942
x = self.to_string()
if not isinstance(x, six.string_types):
x = x.decode('utf-8')
return x
def keyswv(self):
""" Return the keys of attributes or children that has values
:return: list of keys
"""
return [key for key, val in self.__dict__.items() if val]
def keys(self):
""" Return all the keys that represent possible attributes and
children.
:return: list of keys
"""
keys = ['text']
keys.extend([n for (n, t, r) in self.c_attributes.values()])
keys.extend([v[0] for v in self.c_children.values()])
return keys
def children_with_values(self):
""" Returns all children that has values
:return: Possibly empty list of children.
"""
childs = []
for attribute in self._get_all_c_children_with_order():
member = getattr(self, attribute)
if member is None or member == []:
pass
elif isinstance(member, list):
for instance in member:
childs.append(instance)
else:
childs.append(member)
return childs
# noinspection PyUnusedLocal
def set_text(self, val, base64encode=False):
""" Sets the text property of this instance.
:param val: The value of the text property
:param base64encode: Whether the value should be base64encoded
:return: The instance
"""
# print("set_text: %s" % (val,))
if isinstance(val, bool):
if val:
setattr(self, "text", "true")
else:
setattr(self, "text", "false")
elif isinstance(val, int):
setattr(self, "text", "%d" % val)
elif isinstance(val, six.string_types):
setattr(self, "text", val)
elif val is None:
pass
else:
raise ValueError("Type shouldn't be '%s'" % (val,))
return self
def loadd(self, ava, base64encode=False):
"""
Sets attributes, children, extension elements and extension
attributes of this element instance depending on what is in
the given dictionary. If there are already values on properties
those will be overwritten. If the keys in the dictionary does
not correspond to known attributes/children/.. they are ignored.
:param ava: The dictionary
:param base64encode: Whether the values on attributes or texts on
children shoule be base64encoded.
:return: The instance
"""
for prop, _typ, _req in self.c_attributes.values():
# print("# %s" % (prop))
if prop in ava:
if isinstance(ava[prop], bool):
setattr(self, prop, "%s" % ava[prop])
elif isinstance(ava[prop], int):
setattr(self, prop, "%d" % ava[prop])
else:
setattr(self, prop, ava[prop])
if "text" in ava:
self.set_text(ava["text"], base64encode)
for prop, klassdef in self.c_children.values():
# print("## %s, %s" % (prop, klassdef))
if prop in ava:
# print("### %s" % ava[prop])
# means there can be a list of values
if isinstance(klassdef, list):
make_vals(ava[prop], klassdef[0], self, prop,
base64encode=base64encode)
else:
cis = make_vals(ava[prop], klassdef, self, prop, True,
base64encode)
setattr(self, prop, cis)
if "extension_elements" in ava:
for item in ava["extension_elements"]:
self.extension_elements.append(ExtensionElement(
item["tag"]).loadd(item))
if "extension_attributes" in ava:
for key, val in ava["extension_attributes"].items():
self.extension_attributes[key] = val
return self
def clear_text(self):
if self.text:
_text = self.text.strip()
if _text == "":
self.text = None
def __eq__(self, other):
try:
assert isinstance(other, SamlBase)
except AssertionError:
return False
self.clear_text()
other.clear_text()
if len(self.keyswv()) != len(other.keyswv()):
return False
for key in self.keyswv():
if key in ["_extatt"]:
continue
svals = self.__dict__[key]
ovals = other.__dict__[key]
if isinstance(svals, six.string_types):
if svals != ovals:
return False
elif isinstance(svals, list):
for sval in svals:
try:
for oval in ovals:
if sval == oval:
break
else:
return False
except TypeError:
# ovals isn't iterable
return False
else:
if svals == ovals: # Since I only support '=='
pass
else:
return False
return True
def child_class(self, child):
""" Return the class a child element should be an instance of
:param child: The name of the child element
:return: The class
"""
for prop, klassdef in self.c_children.values():
if child == prop:
if isinstance(klassdef, list):
return klassdef[0]
else:
return klassdef
return None
def child_cardinality(self, child):
""" Return the cardinality of a child element
:param child: The name of the child element
:return: The cardinality as a 2-tuple (min, max).
The max value is either a number or the string "unbounded".
The min value is always a number.
"""
for prop, klassdef in self.c_children.values():
if child == prop:
if isinstance(klassdef, list):
try:
_min = self.c_cardinality["min"]
except KeyError:
_min = 1
try:
_max = self.c_cardinality["max"]
except KeyError:
_max = "unbounded"
return _min, _max
else:
return 1, 1
return None
def verify(self):
return valid_instance(self)
def empty(self):
for prop, _typ, _req in self.c_attributes.values():
if getattr(self, prop, None):
return False
for prop, klassdef in self.c_children.values():
if getattr(self, prop):
return False
for param in ["text", "extension_elements", "extension_attributes"]:
if getattr(self, param):
return False
return True
# ----------------------------------------------------------------------------
def element_to_extension_element(element):
"""
Convert an element into a extension element
:param element: The element instance
:return: An extension element instance
"""
exel = ExtensionElement(element.c_tag, element.c_namespace,
text=element.text)
exel.attributes.update(element.extension_attributes)
exel.children.extend(element.extension_elements)
for xml_attribute, (member_name, typ, req) in \
iter(element.c_attributes.items()):
member_value = getattr(element, member_name)
if member_value is not None:
exel.attributes[xml_attribute] = member_value
exel.children.extend([element_to_extension_element(c) for c in
element.children_with_values()])
return exel
def extension_element_to_element(extension_element, translation_functions,
namespace=None):
""" Convert an extension element to a normal element.
In order to do this you need to have an idea of what type of
element it is. Or rather which module it belongs to.
:param extension_element: The extension element
:param translation_functions: A dictionary with class identifiers
as keys and string-to-element translations functions as values
:param namespace: The namespace of the translation functions.
:return: An element instance or None
"""
try:
element_namespace = extension_element.namespace
except AttributeError:
element_namespace = extension_element.c_namespace
if element_namespace == namespace:
try:
try:
ets = translation_functions[extension_element.tag]
except AttributeError:
ets = translation_functions[extension_element.c_tag]
return ets(extension_element.to_string())
except KeyError:
pass
return None
def extension_elements_to_elements(extension_elements, schemas):
""" Create a list of elements each one matching one of the
given extension elements. This is of course dependent on the access
to schemas that describe the extension elements.
:param extension_elements: The list of extension elements
:param schemas: Imported Python modules that represent the different
known schemas used for the extension elements
:return: A list of elements, representing the set of extension elements
that was possible to match against a Class in the given schemas.
The elements returned are the native representation of the elements
according to the schemas.
"""
res = []
if isinstance(schemas, list):
pass
elif isinstance(schemas, dict):
schemas = list(schemas.values())
else:
return res
for extension_element in extension_elements:
for schema in schemas:
inst = extension_element_to_element(extension_element,
schema.ELEMENT_FROM_STRING,
schema.NAMESPACE)
if inst:
res.append(inst)
break
return res
def extension_elements_as_dict(extension_elements, onts):
ees_ = extension_elements_to_elements(extension_elements, onts)
res = {}
for elem in ees_:
try:
res[elem.c_tag].append(elem)
except KeyError:
res[elem.c_tag] = [elem]
return res
| ./CrossVul/dataset_final_sorted/CWE-611/py/bad_4805_1 |
crossvul-python_data_good_4805_3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""
Suppport for the client part of the SAML2.0 SOAP binding.
"""
import logging
from saml2 import create_class_from_element_tree
from saml2.samlp import NAMESPACE as SAMLP_NAMESPACE
from saml2.schema import soapenv
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
#noinspection PyUnresolvedReferences
from elementtree import ElementTree
import defusedxml.ElementTree
logger = logging.getLogger(__name__)
class XmlParseError(Exception):
pass
class WrongMessageType(Exception):
pass
def parse_soap_enveloped_saml_response(text):
tags = ['{%s}Response' % SAMLP_NAMESPACE,
'{%s}LogoutResponse' % SAMLP_NAMESPACE]
return parse_soap_enveloped_saml_thingy(text, tags)
def parse_soap_enveloped_saml_logout_response(text):
tags = ['{%s}Response' % SAMLP_NAMESPACE,
'{%s}LogoutResponse' % SAMLP_NAMESPACE]
return parse_soap_enveloped_saml_thingy(text, tags)
def parse_soap_enveloped_saml_attribute_query(text):
expected_tag = '{%s}AttributeQuery' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_attribute_response(text):
tags = ['{%s}Response' % SAMLP_NAMESPACE,
'{%s}AttributeResponse' % SAMLP_NAMESPACE]
return parse_soap_enveloped_saml_thingy(text, tags)
def parse_soap_enveloped_saml_logout_request(text):
expected_tag = '{%s}LogoutRequest' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_authn_request(text):
expected_tag = '{%s}AuthnRequest' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_artifact_resolve(text):
expected_tag = '{%s}ArtifactResolve' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_artifact_response(text):
expected_tag = '{%s}ArtifactResponse' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_name_id_mapping_request(text):
expected_tag = '{%s}NameIDMappingRequest' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_name_id_mapping_response(text):
expected_tag = '{%s}NameIDMappingResponse' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_manage_name_id_request(text):
expected_tag = '{%s}ManageNameIDRequest' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_manage_name_id_response(text):
expected_tag = '{%s}ManageNameIDResponse' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_assertion_id_request(text):
expected_tag = '{%s}AssertionIDRequest' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_assertion_id_response(text):
tags = ['{%s}Response' % SAMLP_NAMESPACE,
'{%s}AssertionIDResponse' % SAMLP_NAMESPACE]
return parse_soap_enveloped_saml_thingy(text, tags)
def parse_soap_enveloped_saml_authn_query(text):
expected_tag = '{%s}AuthnQuery' % SAMLP_NAMESPACE
return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_authn_query_response(text):
tags = ['{%s}Response' % SAMLP_NAMESPACE]
return parse_soap_enveloped_saml_thingy(text, tags)
def parse_soap_enveloped_saml_authn_response(text):
tags = ['{%s}Response' % SAMLP_NAMESPACE]
return parse_soap_enveloped_saml_thingy(text, tags)
#def parse_soap_enveloped_saml_logout_response(text):
# expected_tag = '{%s}LogoutResponse' % SAMLP_NAMESPACE
# return parse_soap_enveloped_saml_thingy(text, [expected_tag])
def parse_soap_enveloped_saml_thingy(text, expected_tags):
"""Parses a SOAP enveloped SAML thing and returns the thing as
a string.
:param text: The SOAP object as XML string
:param expected_tags: What the tag of the SAML thingy is expected to be.
:return: SAML thingy as a string
"""
envelope = defusedxml.ElementTree.fromstring(text)
# Make sure it's a SOAP message
assert envelope.tag == '{%s}Envelope' % soapenv.NAMESPACE
assert len(envelope) >= 1
body = None
for part in envelope:
if part.tag == '{%s}Body' % soapenv.NAMESPACE:
assert len(part) == 1
body = part
break
if body is None:
return ""
saml_part = body[0]
if saml_part.tag in expected_tags:
return ElementTree.tostring(saml_part, encoding="UTF-8")
else:
raise WrongMessageType("Was '%s' expected one of %s" % (saml_part.tag,
expected_tags))
import re
NS_AND_TAG = re.compile("\{([^}]+)\}(.*)")
def instanciate_class(item, modules):
m = NS_AND_TAG.match(item.tag)
ns, tag = m.groups()
for module in modules:
if module.NAMESPACE == ns:
try:
target = module.ELEMENT_BY_TAG[tag]
return create_class_from_element_tree(target, item)
except KeyError:
continue
raise Exception("Unknown class: ns='%s', tag='%s'" % (ns, tag))
def class_instances_from_soap_enveloped_saml_thingies(text, modules):
"""Parses a SOAP enveloped header and body SAML thing and returns the
thing as a dictionary class instance.
:param text: The SOAP object as XML
:param modules: modules representing xsd schemas
:return: The body and headers as class instances
"""
try:
envelope = defusedxml.ElementTree.fromstring(text)
except Exception as exc:
raise XmlParseError("%s" % exc)
assert envelope.tag == '{%s}Envelope' % soapenv.NAMESPACE
assert len(envelope) >= 1
env = {"header": [], "body": None}
for part in envelope:
if part.tag == '{%s}Body' % soapenv.NAMESPACE:
assert len(part) == 1
env["body"] = instanciate_class(part[0], modules)
elif part.tag == "{%s}Header" % soapenv.NAMESPACE:
for item in part:
env["header"].append(instanciate_class(item, modules))
return env
def open_soap_envelope(text):
"""
:param text: SOAP message
:return: dictionary with two keys "body"/"header"
"""
try:
envelope = defusedxml.ElementTree.fromstring(text)
except Exception as exc:
raise XmlParseError("%s" % exc)
assert envelope.tag == '{%s}Envelope' % soapenv.NAMESPACE
assert len(envelope) >= 1
content = {"header": [], "body": None}
for part in envelope:
if part.tag == '{%s}Body' % soapenv.NAMESPACE:
assert len(part) == 1
content["body"] = ElementTree.tostring(part[0], encoding="UTF-8")
elif part.tag == "{%s}Header" % soapenv.NAMESPACE:
for item in part:
_str = ElementTree.tostring(item, encoding="UTF-8")
content["header"].append(_str)
return content
def make_soap_enveloped_saml_thingy(thingy, headers=None):
""" Returns a soap envelope containing a SAML request
as a text string.
:param thingy: The SAML thingy
:return: The SOAP envelope as a string
"""
soap_envelope = soapenv.Envelope()
if headers:
_header = soapenv.Header()
_header.add_extension_elements(headers)
soap_envelope.header = _header
soap_envelope.body = soapenv.Body()
soap_envelope.body.add_extension_element(thingy)
return "%s" % soap_envelope
def soap_fault(message=None, actor=None, code=None, detail=None):
""" Create a SOAP Fault message
:param message: Human readable error message
:param actor: Who discovered the error
:param code: Error code
:param detail: More specific error message
:return: A SOAP Fault message as a string
"""
_string = _actor = _code = _detail = None
if message:
_string = soapenv.Fault_faultstring(text=message)
if actor:
_actor = soapenv.Fault_faultactor(text=actor)
if code:
_code = soapenv.Fault_faultcode(text=code)
if detail:
_detail = soapenv.Fault_detail(text=detail)
fault = soapenv.Fault(
faultcode=_code,
faultstring=_string,
faultactor=_actor,
detail=_detail,
)
return "%s" % fault
| ./CrossVul/dataset_final_sorted/CWE-611/py/good_4805_3 |
crossvul-python_data_good_4805_2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""Contains classes and functions that are necessary to implement
different bindings.
Bindings normally consists of three parts:
- rules about what to send
- how to package the information
- which protocol to use
"""
from six.moves.urllib.parse import urlparse, urlencode
import saml2
import base64
from saml2.s_utils import deflate_and_base64_encode
from saml2.s_utils import Unsupported
import logging
from saml2.sigver import REQ_ORDER
from saml2.sigver import RESP_ORDER
from saml2.sigver import SIGNER_ALGS
import six
from saml2.xmldsig import SIG_ALLOWED_ALG
logger = logging.getLogger(__name__)
try:
from xml.etree import cElementTree as ElementTree
if ElementTree.VERSION < '1.3.0':
# cElementTree has no support for register_namespace
# neither _namespace_map, thus we sacrify performance
# for correctness
from xml.etree import ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
from elementtree import ElementTree
import defusedxml.ElementTree
NAMESPACE = "http://schemas.xmlsoap.org/soap/envelope/"
FORM_SPEC = """<form method="post" action="%s">
<input type="hidden" name="%s" value="%s" />
<input type="hidden" name="RelayState" value="%s" />
<input type="submit" value="Submit" />
</form>"""
def http_form_post_message(message, location, relay_state="",
typ="SAMLRequest", **kwargs):
"""The HTTP POST binding defines a mechanism by which SAML protocol
messages may be transmitted within the base64-encoded content of a
HTML form control.
:param message: The message
:param location: Where the form should be posted to
:param relay_state: for preserving and conveying state information
:return: A tuple containing header information and a HTML message.
"""
response = ["<head>", """<title>SAML 2.0 POST</title>""", "</head><body>"]
if not isinstance(message, six.string_types):
message = str(message)
if not isinstance(message, six.binary_type):
message = message.encode('utf-8')
if typ == "SAMLRequest" or typ == "SAMLResponse":
_msg = base64.b64encode(message)
else:
_msg = message
_msg = _msg.decode('ascii')
response.append(FORM_SPEC % (location, typ, _msg, relay_state))
response.append("""<script type="text/javascript">""")
response.append(" window.onload = function ()")
response.append(" { document.forms[0].submit(); }")
response.append("""</script>""")
response.append("</body>")
return {"headers": [("Content-type", "text/html")], "data": response}
def http_post_message(message, relay_state="", typ="SAMLRequest", **kwargs):
"""
:param message: The message
:param relay_state: for preserving and conveying state information
:return: A tuple containing header information and a HTML message.
"""
if not isinstance(message, six.string_types):
message = str(message)
if not isinstance(message, six.binary_type):
message = message.encode('utf-8')
if typ == "SAMLRequest" or typ == "SAMLResponse":
_msg = base64.b64encode(message)
else:
_msg = message
_msg = _msg.decode('ascii')
part = {typ: _msg}
if relay_state:
part["RelayState"] = relay_state
return {"headers": [("Content-type", 'application/x-www-form-urlencoded')],
"data": urlencode(part)}
def http_redirect_message(message, location, relay_state="", typ="SAMLRequest",
sigalg='', signer=None, **kwargs):
"""The HTTP Redirect binding defines a mechanism by which SAML protocol
messages can be transmitted within URL parameters.
Messages are encoded for use with this binding using a URL encoding
technique, and transmitted using the HTTP GET method.
The DEFLATE Encoding is used in this function.
:param message: The message
:param location: Where the message should be posted to
:param relay_state: for preserving and conveying state information
:param typ: What type of message it is SAMLRequest/SAMLResponse/SAMLart
:param sigalg: Which algorithm the signature function will use to sign
the message
:param signer: A signature function that can be used to sign the message
:return: A tuple containing header information and a HTML message.
"""
if not isinstance(message, six.string_types):
message = "%s" % (message,)
_order = None
if typ in ["SAMLRequest", "SAMLResponse"]:
if typ == "SAMLRequest":
_order = REQ_ORDER
else:
_order = RESP_ORDER
args = {typ: deflate_and_base64_encode(message)}
elif typ == "SAMLart":
args = {typ: message}
else:
raise Exception("Unknown message type: %s" % typ)
if relay_state:
args["RelayState"] = relay_state
if signer:
# sigalgs, should be one defined in xmldsig
assert sigalg in [b for a, b in SIG_ALLOWED_ALG]
args["SigAlg"] = sigalg
string = "&".join([urlencode({k: args[k]})
for k in _order if k in args]).encode('ascii')
args["Signature"] = base64.b64encode(signer.sign(string))
string = urlencode(args)
else:
string = urlencode(args)
glue_char = "&" if urlparse(location).query else "?"
login_url = glue_char.join([location, string])
headers = [('Location', str(login_url))]
body = []
return {"headers": headers, "data": body}
DUMMY_NAMESPACE = "http://example.org/"
PREFIX = '<?xml version="1.0" encoding="UTF-8"?>'
def make_soap_enveloped_saml_thingy(thingy, header_parts=None):
""" Returns a soap envelope containing a SAML request
as a text string.
:param thingy: The SAML thingy
:return: The SOAP envelope as a string
"""
envelope = ElementTree.Element('')
envelope.tag = '{%s}Envelope' % NAMESPACE
if header_parts:
header = ElementTree.Element('')
header.tag = '{%s}Header' % NAMESPACE
envelope.append(header)
for part in header_parts:
# This doesn't work if the headers are signed
part.become_child_element_of(header)
body = ElementTree.Element('')
body.tag = '{%s}Body' % NAMESPACE
envelope.append(body)
if isinstance(thingy, six.string_types):
# remove the first XML version/encoding line
if thingy[0:5].lower() == '<?xml':
logger.debug("thingy0: %s", thingy)
_part = thingy.split("\n")
thingy = "".join(_part[1:])
thingy = thingy.replace(PREFIX, "")
logger.debug("thingy: %s", thingy)
_child = ElementTree.Element('')
_child.tag = '{%s}FuddleMuddle' % DUMMY_NAMESPACE
body.append(_child)
_str = ElementTree.tostring(envelope, encoding="UTF-8")
if isinstance(_str, six.binary_type):
_str = _str.decode('utf-8')
logger.debug("SOAP precursor: %s", _str)
# find an remove the namespace definition
i = _str.find(DUMMY_NAMESPACE)
j = _str.rfind("xmlns:", 0, i)
cut1 = _str[j:i + len(DUMMY_NAMESPACE) + 1]
_str = _str.replace(cut1, "")
first = _str.find("<%s:FuddleMuddle" % (cut1[6:9],))
last = _str.find(">", first + 14)
cut2 = _str[first:last + 1]
return _str.replace(cut2, thingy)
else:
thingy.become_child_element_of(body)
return ElementTree.tostring(envelope, encoding="UTF-8")
def http_soap_message(message):
return {"headers": [("Content-type", "application/soap+xml")],
"data": make_soap_enveloped_saml_thingy(message)}
def http_paos(message, extra=None):
return {"headers": [("Content-type", "application/soap+xml")],
"data": make_soap_enveloped_saml_thingy(message, extra)}
def parse_soap_enveloped_saml(text, body_class, header_class=None):
"""Parses a SOAP enveloped SAML thing and returns header parts and body
:param text: The SOAP object as XML
:return: header parts and body as saml.samlbase instances
"""
envelope = defusedxml.ElementTree.fromstring(text)
assert envelope.tag == '{%s}Envelope' % NAMESPACE
# print(len(envelope))
body = None
header = {}
for part in envelope:
# print(">",part.tag)
if part.tag == '{%s}Body' % NAMESPACE:
for sub in part:
try:
body = saml2.create_class_from_element_tree(body_class, sub)
except Exception:
raise Exception(
"Wrong body type (%s) in SOAP envelope" % sub.tag)
elif part.tag == '{%s}Header' % NAMESPACE:
if not header_class:
raise Exception("Header where I didn't expect one")
# print("--- HEADER ---")
for sub in part:
# print(">>",sub.tag)
for klass in header_class:
# print("?{%s}%s" % (klass.c_namespace,klass.c_tag))
if sub.tag == "{%s}%s" % (klass.c_namespace, klass.c_tag):
header[sub.tag] = \
saml2.create_class_from_element_tree(klass, sub)
break
return body, header
# -----------------------------------------------------------------------------
PACKING = {
saml2.BINDING_HTTP_REDIRECT: http_redirect_message,
saml2.BINDING_HTTP_POST: http_form_post_message,
}
def packager(identifier):
try:
return PACKING[identifier]
except KeyError:
raise Exception("Unknown binding type: %s" % identifier)
def factory(binding, message, location, relay_state="", typ="SAMLRequest",
**kwargs):
return PACKING[binding](message, location, relay_state, typ, **kwargs)
| ./CrossVul/dataset_final_sorted/CWE-611/py/good_4805_2 |
crossvul-python_data_bad_576_3 | #!/usr/bin/python
from k5test import *
import time
from itertools import imap
# Run kdbtest against the BDB module.
realm = K5Realm(create_kdb=False)
realm.run(['./kdbtest'])
# Set up an OpenLDAP test server if we can.
if (not os.path.exists(os.path.join(plugins, 'kdb', 'kldap.so')) and
not os.path.exists(os.path.join(buildtop, 'lib', 'libkdb_ldap.a'))):
skip_rest('LDAP KDB tests', 'LDAP KDB module not built')
if 'SLAPD' not in os.environ and not which('slapd'):
skip_rest('LDAP KDB tests', 'slapd not found')
slapadd = which('slapadd')
if not slapadd:
skip_rest('LDAP KDB tests', 'slapadd not found')
ldapdir = os.path.abspath('ldap')
dbdir = os.path.join(ldapdir, 'ldap')
slapd_conf = os.path.join(ldapdir, 'slapd.d')
slapd_out = os.path.join(ldapdir, 'slapd.out')
slapd_pidfile = os.path.join(ldapdir, 'pid')
ldap_pwfile = os.path.join(ldapdir, 'pw')
ldap_sock = os.path.join(ldapdir, 'sock')
ldap_uri = 'ldapi://%s/' % ldap_sock.replace(os.path.sep, '%2F')
schema = os.path.join(srctop, 'plugins', 'kdb', 'ldap', 'libkdb_ldap',
'kerberos.openldap.ldif')
top_dn = 'cn=krb5'
admin_dn = 'cn=admin,cn=krb5'
admin_pw = 'admin'
shutil.rmtree(ldapdir, True)
os.mkdir(ldapdir)
os.mkdir(slapd_conf)
os.mkdir(dbdir)
if 'SLAPD' in os.environ:
slapd = os.environ['SLAPD']
else:
# Some Linux installations have AppArmor or similar restrictions
# on the slapd binary, which would prevent it from accessing the
# build directory. Try to defeat this by copying the binary.
system_slapd = which('slapd')
slapd = os.path.join(ldapdir, 'slapd')
shutil.copy(system_slapd, slapd)
def slap_add(ldif):
proc = subprocess.Popen([slapadd, '-b', 'cn=config', '-F', slapd_conf],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, dummy) = proc.communicate(ldif)
output(out)
return proc.wait()
# Configure the pid file and some authorization rules we will need for
# SASL testing.
if slap_add('dn: cn=config\n'
'objectClass: olcGlobal\n'
'olcPidFile: %s\n'
'olcAuthzRegexp: '
'".*uidNumber=%d,cn=peercred,cn=external,cn=auth" "%s"\n'
'olcAuthzRegexp: "uid=digestuser,cn=digest-md5,cn=auth" "%s"\n' %
(slapd_pidfile, os.geteuid(), admin_dn, admin_dn)) != 0:
skip_rest('LDAP KDB tests', 'slapd basic configuration failed')
# Find a working writable database type, trying mdb (added in OpenLDAP
# 2.4.27) and bdb (deprecated and sometimes not built due to licensing
# incompatibilities).
for dbtype in ('mdb', 'bdb'):
# Try to load the module. This could fail if OpenLDAP is built
# without module support, so ignore errors.
slap_add('dn: cn=module,cn=config\n'
'objectClass: olcModuleList\n'
'olcModuleLoad: back_%s\n' % dbtype)
dbclass = 'olc%sConfig' % dbtype.capitalize()
if slap_add('dn: olcDatabase=%s,cn=config\n'
'objectClass: olcDatabaseConfig\n'
'objectClass: %s\n'
'olcSuffix: %s\n'
'olcRootDN: %s\n'
'olcRootPW: %s\n'
'olcDbDirectory: %s\n' %
(dbtype, dbclass, top_dn, admin_dn, admin_pw, dbdir)) == 0:
break
else:
skip_rest('LDAP KDB tests', 'could not find working slapd db type')
if slap_add('include: file://%s\n' % schema) != 0:
skip_rest('LDAP KDB tests', 'failed to load Kerberos schema')
# Load the core schema if we can.
ldap_homes = ['/etc/ldap', '/etc/openldap', '/usr/local/etc/openldap',
'/usr/local/etc/ldap']
local_schema_path = '/schema/core.ldif'
core_schema = next((i for i in imap(lambda x:x+local_schema_path, ldap_homes)
if os.path.isfile(i)), None)
if core_schema:
if slap_add('include: file://%s\n' % core_schema) != 0:
core_schema = None
slapd_pid = -1
def kill_slapd():
global slapd_pid
if slapd_pid != -1:
os.kill(slapd_pid, signal.SIGTERM)
slapd_pid = -1
atexit.register(kill_slapd)
out = open(slapd_out, 'w')
subprocess.call([slapd, '-h', ldap_uri, '-F', slapd_conf], stdout=out,
stderr=out)
out.close()
pidf = open(slapd_pidfile, 'r')
slapd_pid = int(pidf.read())
pidf.close()
output('*** Started slapd (pid %d, output in %s)\n' % (slapd_pid, slapd_out))
# slapd detaches before it finishes setting up its listener sockets
# (they are bound but listen() has not been called). Give it a second
# to finish.
time.sleep(1)
# Run kdbtest against the LDAP module.
conf = {'realms': {'$realm': {'database_module': 'ldap'}},
'dbmodules': {'ldap': {'db_library': 'kldap',
'ldap_kerberos_container_dn': top_dn,
'ldap_kdc_dn': admin_dn,
'ldap_kadmind_dn': admin_dn,
'ldap_service_password_file': ldap_pwfile,
'ldap_servers': ldap_uri}}}
realm = K5Realm(create_kdb=False, kdc_conf=conf)
input = admin_pw + '\n' + admin_pw + '\n'
realm.run([kdb5_ldap_util, 'stashsrvpw', admin_dn], input=input)
realm.run(['./kdbtest'])
# Run a kdb5_ldap_util command using the test server's admin DN and password.
def kldaputil(args, **kw):
return realm.run([kdb5_ldap_util, '-D', admin_dn, '-w', admin_pw] + args,
**kw)
# kdbtest can't currently clean up after itself since the LDAP module
# doesn't support krb5_db_destroy. So clean up after it with
# kdb5_ldap_util before proceeding.
kldaputil(['destroy', '-f'])
ldapmodify = which('ldapmodify')
ldapsearch = which('ldapsearch')
if not ldapmodify or not ldapsearch:
skip_rest('some LDAP KDB tests', 'ldapmodify or ldapsearch not found')
def ldap_search(args):
proc = subprocess.Popen([ldapsearch, '-H', ldap_uri, '-b', top_dn,
'-D', admin_dn, '-w', admin_pw, args],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, dummy) = proc.communicate()
return out
def ldap_modify(ldif, args=[]):
proc = subprocess.Popen([ldapmodify, '-H', ldap_uri, '-D', admin_dn,
'-x', '-w', admin_pw] + args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, dummy) = proc.communicate(ldif)
output(out)
def ldap_add(dn, objectclass, attrs=[]):
in_data = 'dn: %s\nobjectclass: %s\n' % (dn, objectclass)
in_data += '\n'.join(attrs) + '\n'
ldap_modify(in_data, ['-a'])
# Create krbContainer objects for use as subtrees.
ldap_add('cn=t1,cn=krb5', 'krbContainer')
ldap_add('cn=t2,cn=krb5', 'krbContainer')
ldap_add('cn=x,cn=t1,cn=krb5', 'krbContainer')
ldap_add('cn=y,cn=t2,cn=krb5', 'krbContainer')
# Create a realm, exercising all of the realm options.
kldaputil(['create', '-s', '-P', 'master', '-subtrees', 'cn=t2,cn=krb5',
'-containerref', 'cn=t2,cn=krb5', '-sscope', 'one',
'-maxtktlife', '5min', '-maxrenewlife', '10min', '-allow_svr'])
# Modify the realm, exercising overlapping subtree pruning.
kldaputil(['modify', '-subtrees',
'cn=x,cn=t1,cn=krb5:cn=t1,cn=krb5:cn=t2,cn=krb5:cn=y,cn=t2,cn=krb5',
'-containerref', 'cn=t1,cn=krb5', '-sscope', 'sub',
'-maxtktlife', '5hour', '-maxrenewlife', '10hour', '+allow_svr'])
out = kldaputil(['list'])
if out != 'KRBTEST.COM\n':
fail('Unexpected kdb5_ldap_util list output')
# Create a principal at a specified DN. This is a little dodgy
# because we're sticking a krbPrincipalAux objectclass onto a subtree
# krbContainer, but it works and it avoids having to load core.schema
# in the test LDAP server.
realm.run([kadminl, 'ank', '-randkey', '-x', 'dn=cn=krb5', 'princ1'],
expected_code=1, expected_msg='DN is out of the realm subtree')
realm.run([kadminl, 'ank', '-randkey', '-x', 'dn=cn=t2,cn=krb5', 'princ1'])
realm.run([kadminl, 'getprinc', 'princ1'], expected_msg='Principal: princ1')
realm.run([kadminl, 'ank', '-randkey', '-x', 'dn=cn=t2,cn=krb5', 'again'],
expected_code=1, expected_msg='ldap object is already kerberized')
# Check that we can't set linkdn on a non-standalone object.
realm.run([kadminl, 'modprinc', '-x', 'linkdn=cn=t1,cn=krb5', 'princ1'],
expected_code=1, expected_msg='link information can not be set')
# Create a principal with a specified linkdn.
realm.run([kadminl, 'ank', '-randkey', '-x', 'linkdn=cn=krb5', 'princ2'],
expected_code=1, expected_msg='DN is out of the realm subtree')
realm.run([kadminl, 'ank', '-randkey', '-x', 'linkdn=cn=t1,cn=krb5', 'princ2'])
# Check that we can't reset linkdn.
realm.run([kadminl, 'modprinc', '-x', 'linkdn=cn=t2,cn=krb5', 'princ2'],
expected_code=1, expected_msg='kerberos principal is already linked')
# Create a principal with a specified containerdn.
realm.run([kadminl, 'ank', '-randkey', '-x', 'containerdn=cn=krb5', 'princ3'],
expected_code=1, expected_msg='DN is out of the realm subtree')
realm.run([kadminl, 'ank', '-randkey', '-x', 'containerdn=cn=t1,cn=krb5',
'princ3'])
realm.run([kadminl, 'modprinc', '-x', 'containerdn=cn=t2,cn=krb5', 'princ3'],
expected_code=1, expected_msg='containerdn option not supported')
# Create and modify a ticket policy.
kldaputil(['create_policy', '-maxtktlife', '3hour', '-maxrenewlife', '6hour',
'-allow_forwardable', 'tktpol'])
kldaputil(['modify_policy', '-maxtktlife', '4hour', '-maxrenewlife', '8hour',
'+requires_preauth', 'tktpol'])
out = kldaputil(['view_policy', 'tktpol'])
if ('Ticket policy: tktpol\n' not in out or
'Maximum ticket life: 0 days 04:00:00\n' not in out or
'Maximum renewable life: 0 days 08:00:00\n' not in out or
'Ticket flags: DISALLOW_FORWARDABLE REQUIRES_PRE_AUTH' not in out):
fail('Unexpected kdb5_ldap_util view_policy output')
out = kldaputil(['list_policy'])
if out != 'tktpol\n':
fail('Unexpected kdb5_ldap_util list_policy output')
# Associate the ticket policy to a principal.
realm.run([kadminl, 'ank', '-randkey', '-x', 'tktpolicy=tktpol', 'princ4'])
out = realm.run([kadminl, 'getprinc', 'princ4'])
if ('Maximum ticket life: 0 days 04:00:00\n' not in out or
'Maximum renewable life: 0 days 08:00:00\n' not in out or
'Attributes: DISALLOW_FORWARDABLE REQUIRES_PRE_AUTH\n' not in out):
fail('Unexpected getprinc output with ticket policy')
# Destroying the policy should fail while a principal references it.
kldaputil(['destroy_policy', '-force', 'tktpol'], expected_code=1)
# Dissociate the ticket policy from the principal.
realm.run([kadminl, 'modprinc', '-x', 'tktpolicy=', 'princ4'])
out = realm.run([kadminl, 'getprinc', 'princ4'])
if ('Maximum ticket life: 0 days 05:00:00\n' not in out or
'Maximum renewable life: 0 days 10:00:00\n' not in out or
'Attributes:\n' not in out):
fail('Unexpected getprinc output without ticket policy')
# Destroy the ticket policy.
kldaputil(['destroy_policy', '-force', 'tktpol'])
kldaputil(['view_policy', 'tktpol'], expected_code=1)
out = kldaputil(['list_policy'])
if out:
fail('Unexpected kdb5_ldap_util list_policy output after destroy')
# Create another ticket policy to be destroyed with the realm.
kldaputil(['create_policy', 'tktpol2'])
# Try to create a password policy conflicting with a ticket policy.
realm.run([kadminl, 'addpol', 'tktpol2'], expected_code=1,
expected_msg='Already exists while creating policy "tktpol2"')
# Try to create a ticket policy conflicting with a password policy.
realm.run([kadminl, 'addpol', 'pwpol'])
out = kldaputil(['create_policy', 'pwpol'], expected_code=1)
if 'Already exists while creating policy object' not in out:
fail('Expected error not seen in kdb5_ldap_util output')
# Try to use a password policy as a ticket policy.
realm.run([kadminl, 'modprinc', '-x', 'tktpolicy=pwpol', 'princ4'],
expected_code=1, expected_msg='Object class violation')
# Use a ticket policy as a password policy (CVE-2014-5353). This
# works with a warning; use kadmin.local -q so the warning is shown.
realm.run([kadminl, '-q', 'modprinc -policy tktpol2 princ4'],
expected_msg='WARNING: policy "tktpol2" does not exist')
# Do some basic tests with a KDC against the LDAP module, exercising the
# db_args processing code.
realm.start_kdc(['-x', 'nconns=3', '-x', 'host=' + ldap_uri,
'-x', 'binddn=' + admin_dn, '-x', 'bindpwd=' + admin_pw])
realm.addprinc(realm.user_princ, password('user'))
realm.addprinc(realm.host_princ)
realm.extract_keytab(realm.host_princ, realm.keytab)
realm.kinit(realm.user_princ, password('user'))
realm.run([kvno, realm.host_princ])
realm.klist(realm.user_princ, realm.host_princ)
# Test auth indicator support
realm.addprinc('authind', password('authind'))
realm.run([kadminl, 'setstr', 'authind', 'require_auth', 'otp radius'])
out = ldap_search('(krbPrincipalName=authind*)')
if 'krbPrincipalAuthInd: otp' not in out:
fail('Expected krbPrincipalAuthInd value not in output')
if 'krbPrincipalAuthInd: radius' not in out:
fail('Expected krbPrincipalAuthInd value not in output')
realm.run([kadminl, 'getstrs', 'authind'],
expected_msg='require_auth: otp radius')
# Test service principal aliases.
realm.addprinc('canon', password('canon'))
ldap_modify('dn: krbPrincipalName=canon@KRBTEST.COM,cn=t1,cn=krb5\n'
'changetype: modify\n'
'add: krbPrincipalName\n'
'krbPrincipalName: alias@KRBTEST.COM\n'
'-\n'
'add: krbCanonicalName\n'
'krbCanonicalName: canon@KRBTEST.COM\n')
realm.run([kadminl, 'getprinc', 'alias'],
expected_msg='Principal: canon@KRBTEST.COM\n')
realm.run([kadminl, 'getprinc', 'canon'],
expected_msg='Principal: canon@KRBTEST.COM\n')
realm.run([kvno, 'alias', 'canon'])
out = realm.run([klist])
if 'alias@KRBTEST.COM\n' not in out or 'canon@KRBTEST.COM' not in out:
fail('After fetching alias and canon, klist is missing one or both')
realm.kinit(realm.user_princ, password('user'), ['-S', 'alias'])
realm.klist(realm.user_princ, 'alias@KRBTEST.COM')
# Make sure an alias to the local TGS is still treated like an alias.
ldap_modify('dn: krbPrincipalName=krbtgt/KRBTEST.COM@KRBTEST.COM,'
'cn=KRBTEST.COM,cn=krb5\n'
'changetype: modify\n'
'add:krbPrincipalName\n'
'krbPrincipalName: tgtalias@KRBTEST.COM\n'
'-\n'
'add: krbCanonicalName\n'
'krbCanonicalName: krbtgt/KRBTEST.COM@KRBTEST.COM\n')
realm.run([kadminl, 'getprinc', 'tgtalias'],
expected_msg='Principal: krbtgt/KRBTEST.COM@KRBTEST.COM')
realm.kinit(realm.user_princ, password('user'))
realm.run([kvno, 'tgtalias'])
realm.klist(realm.user_princ, 'tgtalias@KRBTEST.COM')
# Make sure aliases work in header tickets.
realm.run([kadminl, 'modprinc', '-maxrenewlife', '3 hours', 'user'])
realm.run([kadminl, 'modprinc', '-maxrenewlife', '3 hours',
'krbtgt/KRBTEST.COM'])
realm.kinit(realm.user_princ, password('user'), ['-l', '1h', '-r', '2h'])
realm.run([kvno, 'alias'])
realm.kinit(realm.user_princ, flags=['-R', '-S', 'alias'])
realm.klist(realm.user_princ, 'alias@KRBTEST.COM')
# Test client principal aliases, with and without preauth.
realm.kinit('canon', password('canon'))
realm.kinit('alias', password('canon'), expected_code=1,
expected_msg='not found in Kerberos database')
realm.kinit('alias', password('canon'), ['-C'])
realm.run([kvno, 'alias'])
realm.klist('canon@KRBTEST.COM', 'alias@KRBTEST.COM')
realm.run([kadminl, 'modprinc', '+requires_preauth', 'canon'])
realm.kinit('canon', password('canon'))
realm.kinit('alias', password('canon'), ['-C'])
# Test password history.
def test_pwhist(nhist):
def cpw(n, **kwargs):
realm.run([kadminl, 'cpw', '-pw', str(n), princ], **kwargs)
def cpw_fail(n):
cpw(n, expected_code=1)
output('*** Testing password history of size %d\n' % nhist)
princ = 'pwhistprinc' + str(nhist)
pol = 'pwhistpol' + str(nhist)
realm.run([kadminl, 'addpol', '-history', str(nhist), pol])
realm.run([kadminl, 'addprinc', '-policy', pol, '-nokey', princ])
for i in range(nhist):
# Set a password, then check that all previous passwords fail.
cpw(i)
for j in range(i + 1):
cpw_fail(j)
# Set one more new password, and make sure the oldest key is
# rotated out.
cpw(nhist)
cpw_fail(1)
cpw(0)
for n in (1, 2, 3, 4, 5):
test_pwhist(n)
# Regression test for #8193: test password character class requirements.
princ = 'charclassprinc'
pol = 'charclasspol'
realm.run([kadminl, 'addpol', '-minclasses', '3', pol])
realm.run([kadminl, 'addprinc', '-policy', pol, '-nokey', princ])
realm.run([kadminl, 'cpw', '-pw', 'abcdef', princ], expected_code=1)
realm.run([kadminl, 'cpw', '-pw', 'Abcdef', princ], expected_code=1)
realm.run([kadminl, 'cpw', '-pw', 'Abcdef1', princ])
# Test principal renaming and make sure last modified is changed
def get_princ(princ):
out = realm.run([kadminl, 'getprinc', princ])
return dict(map(str.strip, x.split(":", 1)) for x in out.splitlines())
realm.addprinc("rename", password('rename'))
renameprinc = get_princ("rename")
realm.run([kadminl, '-p', 'fake@KRBTEST.COM', 'renprinc', 'rename', 'renamed'])
renamedprinc = get_princ("renamed")
if renameprinc['Last modified'] == renamedprinc['Last modified']:
fail('Last modified data not updated when principal was renamed')
# Regression test for #7980 (fencepost when dividing keys up by kvno).
realm.run([kadminl, 'addprinc', '-randkey', '-e', 'aes256-cts,aes128-cts',
'kvnoprinc'])
realm.run([kadminl, 'cpw', '-randkey', '-keepold', '-e',
'aes256-cts,aes128-cts', 'kvnoprinc'])
realm.run([kadminl, 'getprinc', 'kvnoprinc'], expected_msg='Number of keys: 4')
realm.run([kadminl, 'cpw', '-randkey', '-keepold', '-e',
'aes256-cts,aes128-cts', 'kvnoprinc'])
realm.run([kadminl, 'getprinc', 'kvnoprinc'], expected_msg='Number of keys: 6')
# Regression test for #8041 (NULL dereference on keyless principals).
realm.run([kadminl, 'addprinc', '-nokey', 'keylessprinc'])
realm.run([kadminl, 'getprinc', 'keylessprinc'],
expected_msg='Number of keys: 0')
realm.run([kadminl, 'cpw', '-randkey', '-e', 'aes256-cts,aes128-cts',
'keylessprinc'])
realm.run([kadminl, 'cpw', '-randkey', '-keepold', '-e',
'aes256-cts,aes128-cts', 'keylessprinc'])
realm.run([kadminl, 'getprinc', 'keylessprinc'],
expected_msg='Number of keys: 4')
realm.run([kadminl, 'purgekeys', '-all', 'keylessprinc'])
realm.run([kadminl, 'getprinc', 'keylessprinc'],
expected_msg='Number of keys: 0')
# Test for 8354 (old password history entries when -keepold is used)
realm.run([kadminl, 'addpol', '-history', '2', 'keepoldpasspol'])
realm.run([kadminl, 'addprinc', '-policy', 'keepoldpasspol', '-pw', 'aaaa',
'keepoldpassprinc'])
for p in ('bbbb', 'cccc', 'aaaa'):
realm.run([kadminl, 'cpw', '-keepold', '-pw', p, 'keepoldpassprinc'])
if runenv.sizeof_time_t <= 4:
skipped('y2038 LDAP test', 'platform has 32-bit time_t')
else:
# Test storage of timestamps after y2038.
realm.run([kadminl, 'modprinc', '-pwexpire', '2040-02-03', 'user'])
realm.run([kadminl, 'getprinc', 'user'], expected_msg=' 2040\n')
realm.stop()
# Briefly test dump and load.
dumpfile = os.path.join(realm.testdir, 'dump')
realm.run([kdb5_util, 'dump', dumpfile])
realm.run([kdb5_util, 'load', dumpfile], expected_code=1,
expected_msg='KDB module requires -update argument')
realm.run([kdb5_util, 'load', '-update', dumpfile])
# Destroy the realm.
kldaputil(['destroy', '-f'])
out = kldaputil(['list'])
if out:
fail('Unexpected kdb5_ldap_util list output after destroy')
if not core_schema:
skip_rest('LDAP SASL tests', 'core schema not found')
if runenv.have_sasl != 'yes':
skip_rest('LDAP SASL tests', 'SASL support not built')
# Test SASL EXTERNAL auth. Remove the DNs and service password file
# from the DB module config.
os.remove(ldap_pwfile)
dbmod = conf['dbmodules']['ldap']
dbmod['ldap_kdc_sasl_mech'] = dbmod['ldap_kadmind_sasl_mech'] = 'EXTERNAL'
del dbmod['ldap_service_password_file']
del dbmod['ldap_kdc_dn'], dbmod['ldap_kadmind_dn']
realm = K5Realm(create_kdb=False, kdc_conf=conf)
realm.run([kdb5_ldap_util, 'create', '-s', '-P', 'master'])
realm.start_kdc()
realm.addprinc(realm.user_princ, password('user'))
realm.kinit(realm.user_princ, password('user'))
realm.stop()
realm.run([kdb5_ldap_util, 'destroy', '-f'])
# Test SASL DIGEST-MD5 auth. We need to set a clear-text password for
# the admin DN, so create a person entry (requires the core schema).
# Restore the service password file in the config and set authcids.
ldap_add('cn=admin,cn=krb5', 'person',
['sn: dummy', 'userPassword: admin'])
dbmod['ldap_kdc_sasl_mech'] = dbmod['ldap_kadmind_sasl_mech'] = 'DIGEST-MD5'
dbmod['ldap_kdc_sasl_authcid'] = 'digestuser'
dbmod['ldap_kadmind_sasl_authcid'] = 'digestuser'
dbmod['ldap_service_password_file'] = ldap_pwfile
realm = K5Realm(create_kdb=False, kdc_conf=conf)
input = admin_pw + '\n' + admin_pw + '\n'
realm.run([kdb5_ldap_util, 'stashsrvpw', 'digestuser'], input=input)
realm.run([kdb5_ldap_util, 'create', '-s', '-P', 'master'])
realm.start_kdc()
realm.addprinc(realm.user_princ, password('user'))
realm.kinit(realm.user_princ, password('user'))
realm.stop()
# Exercise DB options, which should cause binding to fail.
realm.run([kadminl, '-x', 'sasl_authcid=ab', 'getprinc', 'user'],
expected_code=1, expected_msg='Cannot bind to LDAP server')
realm.run([kadminl, '-x', 'bindpwd=wrong', 'getprinc', 'user'],
expected_code=1, expected_msg='Cannot bind to LDAP server')
realm.run([kdb5_ldap_util, 'destroy', '-f'])
# We could still use tests to exercise:
# * DB arg handling in krb5_ldap_create
# * krbAllowedToDelegateTo attribute processing
# * A load operation overwriting a standalone principal entry which
# already exists but doesn't have a krbPrincipalName attribute
# matching the principal name.
# * A bunch of invalid-input error conditions
#
# There is no coverage for the following because it would be difficult:
# * Out-of-memory error conditions
# * Handling of failures from slapd (including krb5_retry_get_ldap_handle)
# * Handling of servers which don't support mod-increment
# * krb5_ldap_delete_krbcontainer (only happens if krb5_ldap_create fails)
success('LDAP and DB2 KDB tests')
| ./CrossVul/dataset_final_sorted/CWE-476/py/bad_576_3 |
crossvul-python_data_good_576_3 | #!/usr/bin/python
from k5test import *
import time
from itertools import imap
# Run kdbtest against the BDB module.
realm = K5Realm(create_kdb=False)
realm.run(['./kdbtest'])
# Set up an OpenLDAP test server if we can.
if (not os.path.exists(os.path.join(plugins, 'kdb', 'kldap.so')) and
not os.path.exists(os.path.join(buildtop, 'lib', 'libkdb_ldap.a'))):
skip_rest('LDAP KDB tests', 'LDAP KDB module not built')
if 'SLAPD' not in os.environ and not which('slapd'):
skip_rest('LDAP KDB tests', 'slapd not found')
slapadd = which('slapadd')
if not slapadd:
skip_rest('LDAP KDB tests', 'slapadd not found')
ldapdir = os.path.abspath('ldap')
dbdir = os.path.join(ldapdir, 'ldap')
slapd_conf = os.path.join(ldapdir, 'slapd.d')
slapd_out = os.path.join(ldapdir, 'slapd.out')
slapd_pidfile = os.path.join(ldapdir, 'pid')
ldap_pwfile = os.path.join(ldapdir, 'pw')
ldap_sock = os.path.join(ldapdir, 'sock')
ldap_uri = 'ldapi://%s/' % ldap_sock.replace(os.path.sep, '%2F')
schema = os.path.join(srctop, 'plugins', 'kdb', 'ldap', 'libkdb_ldap',
'kerberos.openldap.ldif')
top_dn = 'cn=krb5'
admin_dn = 'cn=admin,cn=krb5'
admin_pw = 'admin'
shutil.rmtree(ldapdir, True)
os.mkdir(ldapdir)
os.mkdir(slapd_conf)
os.mkdir(dbdir)
if 'SLAPD' in os.environ:
slapd = os.environ['SLAPD']
else:
# Some Linux installations have AppArmor or similar restrictions
# on the slapd binary, which would prevent it from accessing the
# build directory. Try to defeat this by copying the binary.
system_slapd = which('slapd')
slapd = os.path.join(ldapdir, 'slapd')
shutil.copy(system_slapd, slapd)
def slap_add(ldif):
proc = subprocess.Popen([slapadd, '-b', 'cn=config', '-F', slapd_conf],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, dummy) = proc.communicate(ldif)
output(out)
return proc.wait()
# Configure the pid file and some authorization rules we will need for
# SASL testing.
if slap_add('dn: cn=config\n'
'objectClass: olcGlobal\n'
'olcPidFile: %s\n'
'olcAuthzRegexp: '
'".*uidNumber=%d,cn=peercred,cn=external,cn=auth" "%s"\n'
'olcAuthzRegexp: "uid=digestuser,cn=digest-md5,cn=auth" "%s"\n' %
(slapd_pidfile, os.geteuid(), admin_dn, admin_dn)) != 0:
skip_rest('LDAP KDB tests', 'slapd basic configuration failed')
# Find a working writable database type, trying mdb (added in OpenLDAP
# 2.4.27) and bdb (deprecated and sometimes not built due to licensing
# incompatibilities).
for dbtype in ('mdb', 'bdb'):
# Try to load the module. This could fail if OpenLDAP is built
# without module support, so ignore errors.
slap_add('dn: cn=module,cn=config\n'
'objectClass: olcModuleList\n'
'olcModuleLoad: back_%s\n' % dbtype)
dbclass = 'olc%sConfig' % dbtype.capitalize()
if slap_add('dn: olcDatabase=%s,cn=config\n'
'objectClass: olcDatabaseConfig\n'
'objectClass: %s\n'
'olcSuffix: %s\n'
'olcRootDN: %s\n'
'olcRootPW: %s\n'
'olcDbDirectory: %s\n' %
(dbtype, dbclass, top_dn, admin_dn, admin_pw, dbdir)) == 0:
break
else:
skip_rest('LDAP KDB tests', 'could not find working slapd db type')
if slap_add('include: file://%s\n' % schema) != 0:
skip_rest('LDAP KDB tests', 'failed to load Kerberos schema')
# Load the core schema if we can.
ldap_homes = ['/etc/ldap', '/etc/openldap', '/usr/local/etc/openldap',
'/usr/local/etc/ldap']
local_schema_path = '/schema/core.ldif'
core_schema = next((i for i in imap(lambda x:x+local_schema_path, ldap_homes)
if os.path.isfile(i)), None)
if core_schema:
if slap_add('include: file://%s\n' % core_schema) != 0:
core_schema = None
slapd_pid = -1
def kill_slapd():
global slapd_pid
if slapd_pid != -1:
os.kill(slapd_pid, signal.SIGTERM)
slapd_pid = -1
atexit.register(kill_slapd)
out = open(slapd_out, 'w')
subprocess.call([slapd, '-h', ldap_uri, '-F', slapd_conf], stdout=out,
stderr=out)
out.close()
pidf = open(slapd_pidfile, 'r')
slapd_pid = int(pidf.read())
pidf.close()
output('*** Started slapd (pid %d, output in %s)\n' % (slapd_pid, slapd_out))
# slapd detaches before it finishes setting up its listener sockets
# (they are bound but listen() has not been called). Give it a second
# to finish.
time.sleep(1)
# Run kdbtest against the LDAP module.
conf = {'realms': {'$realm': {'database_module': 'ldap'}},
'dbmodules': {'ldap': {'db_library': 'kldap',
'ldap_kerberos_container_dn': top_dn,
'ldap_kdc_dn': admin_dn,
'ldap_kadmind_dn': admin_dn,
'ldap_service_password_file': ldap_pwfile,
'ldap_servers': ldap_uri}}}
realm = K5Realm(create_kdb=False, kdc_conf=conf)
input = admin_pw + '\n' + admin_pw + '\n'
realm.run([kdb5_ldap_util, 'stashsrvpw', admin_dn], input=input)
realm.run(['./kdbtest'])
# Run a kdb5_ldap_util command using the test server's admin DN and password.
def kldaputil(args, **kw):
return realm.run([kdb5_ldap_util, '-D', admin_dn, '-w', admin_pw] + args,
**kw)
# kdbtest can't currently clean up after itself since the LDAP module
# doesn't support krb5_db_destroy. So clean up after it with
# kdb5_ldap_util before proceeding.
kldaputil(['destroy', '-f'])
ldapmodify = which('ldapmodify')
ldapsearch = which('ldapsearch')
if not ldapmodify or not ldapsearch:
skip_rest('some LDAP KDB tests', 'ldapmodify or ldapsearch not found')
def ldap_search(args):
proc = subprocess.Popen([ldapsearch, '-H', ldap_uri, '-b', top_dn,
'-D', admin_dn, '-w', admin_pw, args],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, dummy) = proc.communicate()
return out
def ldap_modify(ldif, args=[]):
proc = subprocess.Popen([ldapmodify, '-H', ldap_uri, '-D', admin_dn,
'-x', '-w', admin_pw] + args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, dummy) = proc.communicate(ldif)
output(out)
def ldap_add(dn, objectclass, attrs=[]):
in_data = 'dn: %s\nobjectclass: %s\n' % (dn, objectclass)
in_data += '\n'.join(attrs) + '\n'
ldap_modify(in_data, ['-a'])
# Create krbContainer objects for use as subtrees.
ldap_add('cn=t1,cn=krb5', 'krbContainer')
ldap_add('cn=t2,cn=krb5', 'krbContainer')
ldap_add('cn=x,cn=t1,cn=krb5', 'krbContainer')
ldap_add('cn=y,cn=t2,cn=krb5', 'krbContainer')
# Create a realm, exercising all of the realm options.
kldaputil(['create', '-s', '-P', 'master', '-subtrees', 'cn=t2,cn=krb5',
'-containerref', 'cn=t2,cn=krb5', '-sscope', 'one',
'-maxtktlife', '5min', '-maxrenewlife', '10min', '-allow_svr'])
# Modify the realm, exercising overlapping subtree pruning.
kldaputil(['modify', '-subtrees',
'cn=x,cn=t1,cn=krb5:cn=t1,cn=krb5:cn=t2,cn=krb5:cn=y,cn=t2,cn=krb5',
'-containerref', 'cn=t1,cn=krb5', '-sscope', 'sub',
'-maxtktlife', '5hour', '-maxrenewlife', '10hour', '+allow_svr'])
out = kldaputil(['list'])
if out != 'KRBTEST.COM\n':
fail('Unexpected kdb5_ldap_util list output')
# Create a principal at a specified DN. This is a little dodgy
# because we're sticking a krbPrincipalAux objectclass onto a subtree
# krbContainer, but it works and it avoids having to load core.schema
# in the test LDAP server.
realm.run([kadminl, 'ank', '-randkey', '-x', 'dn=cn=krb5', 'princ1'],
expected_code=1, expected_msg='DN is out of the realm subtree')
# Check that the DN container check is a hierarchy test, not a simple
# suffix match (CVE-2018-5730). We expect this operation to fail
# either way (because "xcn" isn't a valid DN tag) but the container
# check should happen before the DN is parsed.
realm.run([kadminl, 'ank', '-randkey', '-x', 'dn=xcn=t1,cn=krb5', 'princ1'],
expected_code=1, expected_msg='DN is out of the realm subtree')
realm.run([kadminl, 'ank', '-randkey', '-x', 'dn=cn=t2,cn=krb5', 'princ1'])
realm.run([kadminl, 'getprinc', 'princ1'], expected_msg='Principal: princ1')
realm.run([kadminl, 'ank', '-randkey', '-x', 'dn=cn=t2,cn=krb5', 'again'],
expected_code=1, expected_msg='ldap object is already kerberized')
# Check that we can't set linkdn on a non-standalone object.
realm.run([kadminl, 'modprinc', '-x', 'linkdn=cn=t1,cn=krb5', 'princ1'],
expected_code=1, expected_msg='link information can not be set')
# Create a principal with a specified linkdn.
realm.run([kadminl, 'ank', '-randkey', '-x', 'linkdn=cn=krb5', 'princ2'],
expected_code=1, expected_msg='DN is out of the realm subtree')
realm.run([kadminl, 'ank', '-randkey', '-x', 'linkdn=cn=t1,cn=krb5', 'princ2'])
# Check that we can't reset linkdn.
realm.run([kadminl, 'modprinc', '-x', 'linkdn=cn=t2,cn=krb5', 'princ2'],
expected_code=1, expected_msg='kerberos principal is already linked')
# Create a principal with a specified containerdn.
realm.run([kadminl, 'ank', '-randkey', '-x', 'containerdn=cn=krb5', 'princ3'],
expected_code=1, expected_msg='DN is out of the realm subtree')
realm.run([kadminl, 'ank', '-randkey', '-x', 'containerdn=cn=t1,cn=krb5',
'princ3'])
realm.run([kadminl, 'modprinc', '-x', 'containerdn=cn=t2,cn=krb5', 'princ3'],
expected_code=1, expected_msg='containerdn option not supported')
# Verify that containerdn is checked when linkdn is also supplied
# (CVE-2018-5730).
realm.run([kadminl, 'ank', '-randkey', '-x', 'containerdn=cn=krb5',
'-x', 'linkdn=cn=t2,cn=krb5', 'princ4'], expected_code=1,
expected_msg='DN is out of the realm subtree')
# Create and modify a ticket policy.
kldaputil(['create_policy', '-maxtktlife', '3hour', '-maxrenewlife', '6hour',
'-allow_forwardable', 'tktpol'])
kldaputil(['modify_policy', '-maxtktlife', '4hour', '-maxrenewlife', '8hour',
'+requires_preauth', 'tktpol'])
out = kldaputil(['view_policy', 'tktpol'])
if ('Ticket policy: tktpol\n' not in out or
'Maximum ticket life: 0 days 04:00:00\n' not in out or
'Maximum renewable life: 0 days 08:00:00\n' not in out or
'Ticket flags: DISALLOW_FORWARDABLE REQUIRES_PRE_AUTH' not in out):
fail('Unexpected kdb5_ldap_util view_policy output')
out = kldaputil(['list_policy'])
if out != 'tktpol\n':
fail('Unexpected kdb5_ldap_util list_policy output')
# Associate the ticket policy to a principal.
realm.run([kadminl, 'ank', '-randkey', '-x', 'tktpolicy=tktpol', 'princ4'])
out = realm.run([kadminl, 'getprinc', 'princ4'])
if ('Maximum ticket life: 0 days 04:00:00\n' not in out or
'Maximum renewable life: 0 days 08:00:00\n' not in out or
'Attributes: DISALLOW_FORWARDABLE REQUIRES_PRE_AUTH\n' not in out):
fail('Unexpected getprinc output with ticket policy')
# Destroying the policy should fail while a principal references it.
kldaputil(['destroy_policy', '-force', 'tktpol'], expected_code=1)
# Dissociate the ticket policy from the principal.
realm.run([kadminl, 'modprinc', '-x', 'tktpolicy=', 'princ4'])
out = realm.run([kadminl, 'getprinc', 'princ4'])
if ('Maximum ticket life: 0 days 05:00:00\n' not in out or
'Maximum renewable life: 0 days 10:00:00\n' not in out or
'Attributes:\n' not in out):
fail('Unexpected getprinc output without ticket policy')
# Destroy the ticket policy.
kldaputil(['destroy_policy', '-force', 'tktpol'])
kldaputil(['view_policy', 'tktpol'], expected_code=1)
out = kldaputil(['list_policy'])
if out:
fail('Unexpected kdb5_ldap_util list_policy output after destroy')
# Create another ticket policy to be destroyed with the realm.
kldaputil(['create_policy', 'tktpol2'])
# Try to create a password policy conflicting with a ticket policy.
realm.run([kadminl, 'addpol', 'tktpol2'], expected_code=1,
expected_msg='Already exists while creating policy "tktpol2"')
# Try to create a ticket policy conflicting with a password policy.
realm.run([kadminl, 'addpol', 'pwpol'])
out = kldaputil(['create_policy', 'pwpol'], expected_code=1)
if 'Already exists while creating policy object' not in out:
fail('Expected error not seen in kdb5_ldap_util output')
# Try to use a password policy as a ticket policy.
realm.run([kadminl, 'modprinc', '-x', 'tktpolicy=pwpol', 'princ4'],
expected_code=1, expected_msg='Object class violation')
# Use a ticket policy as a password policy (CVE-2014-5353). This
# works with a warning; use kadmin.local -q so the warning is shown.
realm.run([kadminl, '-q', 'modprinc -policy tktpol2 princ4'],
expected_msg='WARNING: policy "tktpol2" does not exist')
# Do some basic tests with a KDC against the LDAP module, exercising the
# db_args processing code.
realm.start_kdc(['-x', 'nconns=3', '-x', 'host=' + ldap_uri,
'-x', 'binddn=' + admin_dn, '-x', 'bindpwd=' + admin_pw])
realm.addprinc(realm.user_princ, password('user'))
realm.addprinc(realm.host_princ)
realm.extract_keytab(realm.host_princ, realm.keytab)
realm.kinit(realm.user_princ, password('user'))
realm.run([kvno, realm.host_princ])
realm.klist(realm.user_princ, realm.host_princ)
# Test auth indicator support
realm.addprinc('authind', password('authind'))
realm.run([kadminl, 'setstr', 'authind', 'require_auth', 'otp radius'])
out = ldap_search('(krbPrincipalName=authind*)')
if 'krbPrincipalAuthInd: otp' not in out:
fail('Expected krbPrincipalAuthInd value not in output')
if 'krbPrincipalAuthInd: radius' not in out:
fail('Expected krbPrincipalAuthInd value not in output')
realm.run([kadminl, 'getstrs', 'authind'],
expected_msg='require_auth: otp radius')
# Test service principal aliases.
realm.addprinc('canon', password('canon'))
ldap_modify('dn: krbPrincipalName=canon@KRBTEST.COM,cn=t1,cn=krb5\n'
'changetype: modify\n'
'add: krbPrincipalName\n'
'krbPrincipalName: alias@KRBTEST.COM\n'
'-\n'
'add: krbCanonicalName\n'
'krbCanonicalName: canon@KRBTEST.COM\n')
realm.run([kadminl, 'getprinc', 'alias'],
expected_msg='Principal: canon@KRBTEST.COM\n')
realm.run([kadminl, 'getprinc', 'canon'],
expected_msg='Principal: canon@KRBTEST.COM\n')
realm.run([kvno, 'alias', 'canon'])
out = realm.run([klist])
if 'alias@KRBTEST.COM\n' not in out or 'canon@KRBTEST.COM' not in out:
fail('After fetching alias and canon, klist is missing one or both')
realm.kinit(realm.user_princ, password('user'), ['-S', 'alias'])
realm.klist(realm.user_princ, 'alias@KRBTEST.COM')
# Make sure an alias to the local TGS is still treated like an alias.
ldap_modify('dn: krbPrincipalName=krbtgt/KRBTEST.COM@KRBTEST.COM,'
'cn=KRBTEST.COM,cn=krb5\n'
'changetype: modify\n'
'add:krbPrincipalName\n'
'krbPrincipalName: tgtalias@KRBTEST.COM\n'
'-\n'
'add: krbCanonicalName\n'
'krbCanonicalName: krbtgt/KRBTEST.COM@KRBTEST.COM\n')
realm.run([kadminl, 'getprinc', 'tgtalias'],
expected_msg='Principal: krbtgt/KRBTEST.COM@KRBTEST.COM')
realm.kinit(realm.user_princ, password('user'))
realm.run([kvno, 'tgtalias'])
realm.klist(realm.user_princ, 'tgtalias@KRBTEST.COM')
# Make sure aliases work in header tickets.
realm.run([kadminl, 'modprinc', '-maxrenewlife', '3 hours', 'user'])
realm.run([kadminl, 'modprinc', '-maxrenewlife', '3 hours',
'krbtgt/KRBTEST.COM'])
realm.kinit(realm.user_princ, password('user'), ['-l', '1h', '-r', '2h'])
realm.run([kvno, 'alias'])
realm.kinit(realm.user_princ, flags=['-R', '-S', 'alias'])
realm.klist(realm.user_princ, 'alias@KRBTEST.COM')
# Test client principal aliases, with and without preauth.
realm.kinit('canon', password('canon'))
realm.kinit('alias', password('canon'), expected_code=1,
expected_msg='not found in Kerberos database')
realm.kinit('alias', password('canon'), ['-C'])
realm.run([kvno, 'alias'])
realm.klist('canon@KRBTEST.COM', 'alias@KRBTEST.COM')
realm.run([kadminl, 'modprinc', '+requires_preauth', 'canon'])
realm.kinit('canon', password('canon'))
realm.kinit('alias', password('canon'), ['-C'])
# Test password history.
def test_pwhist(nhist):
def cpw(n, **kwargs):
realm.run([kadminl, 'cpw', '-pw', str(n), princ], **kwargs)
def cpw_fail(n):
cpw(n, expected_code=1)
output('*** Testing password history of size %d\n' % nhist)
princ = 'pwhistprinc' + str(nhist)
pol = 'pwhistpol' + str(nhist)
realm.run([kadminl, 'addpol', '-history', str(nhist), pol])
realm.run([kadminl, 'addprinc', '-policy', pol, '-nokey', princ])
for i in range(nhist):
# Set a password, then check that all previous passwords fail.
cpw(i)
for j in range(i + 1):
cpw_fail(j)
# Set one more new password, and make sure the oldest key is
# rotated out.
cpw(nhist)
cpw_fail(1)
cpw(0)
for n in (1, 2, 3, 4, 5):
test_pwhist(n)
# Regression test for #8193: test password character class requirements.
princ = 'charclassprinc'
pol = 'charclasspol'
realm.run([kadminl, 'addpol', '-minclasses', '3', pol])
realm.run([kadminl, 'addprinc', '-policy', pol, '-nokey', princ])
realm.run([kadminl, 'cpw', '-pw', 'abcdef', princ], expected_code=1)
realm.run([kadminl, 'cpw', '-pw', 'Abcdef', princ], expected_code=1)
realm.run([kadminl, 'cpw', '-pw', 'Abcdef1', princ])
# Test principal renaming and make sure last modified is changed
def get_princ(princ):
out = realm.run([kadminl, 'getprinc', princ])
return dict(map(str.strip, x.split(":", 1)) for x in out.splitlines())
realm.addprinc("rename", password('rename'))
renameprinc = get_princ("rename")
realm.run([kadminl, '-p', 'fake@KRBTEST.COM', 'renprinc', 'rename', 'renamed'])
renamedprinc = get_princ("renamed")
if renameprinc['Last modified'] == renamedprinc['Last modified']:
fail('Last modified data not updated when principal was renamed')
# Regression test for #7980 (fencepost when dividing keys up by kvno).
realm.run([kadminl, 'addprinc', '-randkey', '-e', 'aes256-cts,aes128-cts',
'kvnoprinc'])
realm.run([kadminl, 'cpw', '-randkey', '-keepold', '-e',
'aes256-cts,aes128-cts', 'kvnoprinc'])
realm.run([kadminl, 'getprinc', 'kvnoprinc'], expected_msg='Number of keys: 4')
realm.run([kadminl, 'cpw', '-randkey', '-keepold', '-e',
'aes256-cts,aes128-cts', 'kvnoprinc'])
realm.run([kadminl, 'getprinc', 'kvnoprinc'], expected_msg='Number of keys: 6')
# Regression test for #8041 (NULL dereference on keyless principals).
realm.run([kadminl, 'addprinc', '-nokey', 'keylessprinc'])
realm.run([kadminl, 'getprinc', 'keylessprinc'],
expected_msg='Number of keys: 0')
realm.run([kadminl, 'cpw', '-randkey', '-e', 'aes256-cts,aes128-cts',
'keylessprinc'])
realm.run([kadminl, 'cpw', '-randkey', '-keepold', '-e',
'aes256-cts,aes128-cts', 'keylessprinc'])
realm.run([kadminl, 'getprinc', 'keylessprinc'],
expected_msg='Number of keys: 4')
realm.run([kadminl, 'purgekeys', '-all', 'keylessprinc'])
realm.run([kadminl, 'getprinc', 'keylessprinc'],
expected_msg='Number of keys: 0')
# Test for 8354 (old password history entries when -keepold is used)
realm.run([kadminl, 'addpol', '-history', '2', 'keepoldpasspol'])
realm.run([kadminl, 'addprinc', '-policy', 'keepoldpasspol', '-pw', 'aaaa',
'keepoldpassprinc'])
for p in ('bbbb', 'cccc', 'aaaa'):
realm.run([kadminl, 'cpw', '-keepold', '-pw', p, 'keepoldpassprinc'])
if runenv.sizeof_time_t <= 4:
skipped('y2038 LDAP test', 'platform has 32-bit time_t')
else:
# Test storage of timestamps after y2038.
realm.run([kadminl, 'modprinc', '-pwexpire', '2040-02-03', 'user'])
realm.run([kadminl, 'getprinc', 'user'], expected_msg=' 2040\n')
realm.stop()
# Briefly test dump and load.
dumpfile = os.path.join(realm.testdir, 'dump')
realm.run([kdb5_util, 'dump', dumpfile])
realm.run([kdb5_util, 'load', dumpfile], expected_code=1,
expected_msg='KDB module requires -update argument')
realm.run([kdb5_util, 'load', '-update', dumpfile])
# Destroy the realm.
kldaputil(['destroy', '-f'])
out = kldaputil(['list'])
if out:
fail('Unexpected kdb5_ldap_util list output after destroy')
if not core_schema:
skip_rest('LDAP SASL tests', 'core schema not found')
if runenv.have_sasl != 'yes':
skip_rest('LDAP SASL tests', 'SASL support not built')
# Test SASL EXTERNAL auth. Remove the DNs and service password file
# from the DB module config.
os.remove(ldap_pwfile)
dbmod = conf['dbmodules']['ldap']
dbmod['ldap_kdc_sasl_mech'] = dbmod['ldap_kadmind_sasl_mech'] = 'EXTERNAL'
del dbmod['ldap_service_password_file']
del dbmod['ldap_kdc_dn'], dbmod['ldap_kadmind_dn']
realm = K5Realm(create_kdb=False, kdc_conf=conf)
realm.run([kdb5_ldap_util, 'create', '-s', '-P', 'master'])
realm.start_kdc()
realm.addprinc(realm.user_princ, password('user'))
realm.kinit(realm.user_princ, password('user'))
realm.stop()
realm.run([kdb5_ldap_util, 'destroy', '-f'])
# Test SASL DIGEST-MD5 auth. We need to set a clear-text password for
# the admin DN, so create a person entry (requires the core schema).
# Restore the service password file in the config and set authcids.
ldap_add('cn=admin,cn=krb5', 'person',
['sn: dummy', 'userPassword: admin'])
dbmod['ldap_kdc_sasl_mech'] = dbmod['ldap_kadmind_sasl_mech'] = 'DIGEST-MD5'
dbmod['ldap_kdc_sasl_authcid'] = 'digestuser'
dbmod['ldap_kadmind_sasl_authcid'] = 'digestuser'
dbmod['ldap_service_password_file'] = ldap_pwfile
realm = K5Realm(create_kdb=False, kdc_conf=conf)
input = admin_pw + '\n' + admin_pw + '\n'
realm.run([kdb5_ldap_util, 'stashsrvpw', 'digestuser'], input=input)
realm.run([kdb5_ldap_util, 'create', '-s', '-P', 'master'])
realm.start_kdc()
realm.addprinc(realm.user_princ, password('user'))
realm.kinit(realm.user_princ, password('user'))
realm.stop()
# Exercise DB options, which should cause binding to fail.
realm.run([kadminl, '-x', 'sasl_authcid=ab', 'getprinc', 'user'],
expected_code=1, expected_msg='Cannot bind to LDAP server')
realm.run([kadminl, '-x', 'bindpwd=wrong', 'getprinc', 'user'],
expected_code=1, expected_msg='Cannot bind to LDAP server')
realm.run([kdb5_ldap_util, 'destroy', '-f'])
# We could still use tests to exercise:
# * DB arg handling in krb5_ldap_create
# * krbAllowedToDelegateTo attribute processing
# * A load operation overwriting a standalone principal entry which
# already exists but doesn't have a krbPrincipalName attribute
# matching the principal name.
# * A bunch of invalid-input error conditions
#
# There is no coverage for the following because it would be difficult:
# * Out-of-memory error conditions
# * Handling of failures from slapd (including krb5_retry_get_ldap_handle)
# * Handling of servers which don't support mod-increment
# * krb5_ldap_delete_krbcontainer (only happens if krb5_ldap_create fails)
success('LDAP and DB2 KDB tests')
| ./CrossVul/dataset_final_sorted/CWE-476/py/good_576_3 |
crossvul-python_data_bad_4997_1 | #!/usr/bin/python
from k5test import *
# Skip this test if pkinit wasn't built.
if not os.path.exists(os.path.join(plugins, 'preauth', 'pkinit.so')):
skip_rest('PKINIT tests', 'PKINIT module not built')
# Check if soft-pkcs11.so is available.
try:
import ctypes
lib = ctypes.LibraryLoader(ctypes.CDLL).LoadLibrary('soft-pkcs11.so')
del lib
have_soft_pkcs11 = True
except:
have_soft_pkcs11 = False
# Construct a krb5.conf fragment configuring pkinit.
certs = os.path.join(srctop, 'tests', 'dejagnu', 'pkinit-certs')
ca_pem = os.path.join(certs, 'ca.pem')
kdc_pem = os.path.join(certs, 'kdc.pem')
user_pem = os.path.join(certs, 'user.pem')
privkey_pem = os.path.join(certs, 'privkey.pem')
privkey_enc_pem = os.path.join(certs, 'privkey-enc.pem')
user_p12 = os.path.join(certs, 'user.p12')
user_enc_p12 = os.path.join(certs, 'user-enc.p12')
path = os.path.join(os.getcwd(), 'testdir', 'tmp-pkinit-certs')
path_enc = os.path.join(os.getcwd(), 'testdir', 'tmp-pkinit-certs-enc')
pkinit_krb5_conf = {'realms': {'$realm': {
'pkinit_anchors': 'FILE:%s' % ca_pem}}}
pkinit_kdc_conf = {'realms': {'$realm': {
'default_principal_flags': '+preauth',
'pkinit_eku_checking': 'none',
'pkinit_identity': 'FILE:%s,%s' % (kdc_pem, privkey_pem),
'pkinit_indicator': ['indpkinit1', 'indpkinit2']}}}
restrictive_kdc_conf = {'realms': {'$realm': {
'restrict_anonymous_to_tgt': 'true' }}}
file_identity = 'FILE:%s,%s' % (user_pem, privkey_pem)
file_enc_identity = 'FILE:%s,%s' % (user_pem, privkey_enc_pem)
dir_identity = 'DIR:%s' % path
dir_enc_identity = 'DIR:%s' % path_enc
dir_file_identity = 'FILE:%s,%s' % (os.path.join(path, 'user.crt'),
os.path.join(path, 'user.key'))
dir_file_enc_identity = 'FILE:%s,%s' % (os.path.join(path_enc, 'user.crt'),
os.path.join(path_enc, 'user.key'))
p12_identity = 'PKCS12:%s' % user_p12
p12_enc_identity = 'PKCS12:%s' % user_enc_p12
p11_identity = 'PKCS11:soft-pkcs11.so'
p11_token_identity = ('PKCS11:module_name=soft-pkcs11.so:'
'slotid=1:token=SoftToken (token)')
realm = K5Realm(krb5_conf=pkinit_krb5_conf, kdc_conf=pkinit_kdc_conf,
get_creds=False)
# Sanity check - password-based preauth should still work.
realm.run(['./responder', '-r', 'password=%s' % password('user'),
realm.user_princ])
realm.kinit(realm.user_princ, password=password('user'))
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# Test anonymous PKINIT.
out = realm.kinit('@%s' % realm.realm, flags=['-n'], expected_code=1)
if 'not found in Kerberos database' not in out:
fail('Wrong error for anonymous PKINIT without anonymous enabled')
realm.addprinc('WELLKNOWN/ANONYMOUS')
realm.kinit('@%s' % realm.realm, flags=['-n'])
realm.klist('WELLKNOWN/ANONYMOUS@WELLKNOWN:ANONYMOUS')
realm.run([kvno, realm.host_princ])
out = realm.run(['./adata', realm.host_princ])
if '97:' in out:
fail('auth indicators seen in anonymous PKINIT ticket')
# Test anonymous kadmin.
f = open(os.path.join(realm.testdir, 'acl'), 'a')
f.write('WELLKNOWN/ANONYMOUS@WELLKNOWN:ANONYMOUS a *')
f.close()
realm.start_kadmind()
realm.run([kadmin, '-n', 'addprinc', '-pw', 'test', 'testadd'])
out = realm.run([kadmin, '-n', 'getprinc', 'testadd'], expected_code=1)
if "Operation requires ``get'' privilege" not in out:
fail('Anonymous kadmin has too much privilege')
realm.stop_kadmind()
# Test with anonymous restricted; FAST should work but kvno should fail.
r_env = realm.special_env('restrict', True, kdc_conf=restrictive_kdc_conf)
realm.stop_kdc()
realm.start_kdc(env=r_env)
realm.kinit('@%s' % realm.realm, flags=['-n'])
realm.kinit('@%s' % realm.realm, flags=['-n', '-T', realm.ccache])
out = realm.run([kvno, realm.host_princ], expected_code=1)
if 'KDC policy rejects request' not in out:
fail('Wrong error for restricted anonymous PKINIT')
# Go back to a normal KDC and disable anonymous PKINIT.
realm.stop_kdc()
realm.start_kdc()
realm.run([kadminl, 'delprinc', 'WELLKNOWN/ANONYMOUS'])
# Run the basic test - PKINIT with FILE: identity, with no password on the key.
realm.run(['./responder', '-x', 'pkinit=',
'-X', 'X509_user_identity=%s' % file_identity, realm.user_princ])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % file_identity])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# Run the basic test - PKINIT with FILE: identity, with a password on the key,
# supplied by the prompter.
# Expect failure if the responder does nothing, and we have no prompter.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % file_enc_identity,
'-X', 'X509_user_identity=%s' % file_enc_identity, realm.user_princ],
expected_code=2)
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % file_enc_identity],
password='encrypted')
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
out = realm.run(['./adata', realm.host_princ])
if '+97: [indpkinit1, indpkinit2]' not in out:
fail('auth indicators not seen in PKINIT ticket')
# Run the basic test - PKINIT with FILE: identity, with a password on the key,
# supplied by the responder.
# Supply the response in raw form.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % file_enc_identity,
'-r', 'pkinit={"%s": "encrypted"}' % file_enc_identity,
'-X', 'X509_user_identity=%s' % file_enc_identity,
realm.user_princ])
# Supply the response through the convenience API.
realm.run(['./responder', '-X', 'X509_user_identity=%s' % file_enc_identity,
'-p', '%s=%s' % (file_enc_identity, 'encrypted'), realm.user_princ])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with DIR: identity, with no password on the key.
os.mkdir(path)
os.mkdir(path_enc)
shutil.copy(privkey_pem, os.path.join(path, 'user.key'))
shutil.copy(privkey_enc_pem, os.path.join(path_enc, 'user.key'))
shutil.copy(user_pem, os.path.join(path, 'user.crt'))
shutil.copy(user_pem, os.path.join(path_enc, 'user.crt'))
realm.run(['./responder', '-x', 'pkinit=', '-X',
'X509_user_identity=%s' % dir_identity, realm.user_princ])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % dir_identity])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with DIR: identity, with a password on the key, supplied by the
# prompter.
# Expect failure if the responder does nothing, and we have no prompter.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % dir_file_enc_identity,
'-X', 'X509_user_identity=%s' % dir_enc_identity, realm.user_princ],
expected_code=2)
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % dir_enc_identity],
password='encrypted')
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with DIR: identity, with a password on the key, supplied by the
# responder.
# Supply the response in raw form.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % dir_file_enc_identity,
'-r', 'pkinit={"%s": "encrypted"}' % dir_file_enc_identity,
'-X', 'X509_user_identity=%s' % dir_enc_identity, realm.user_princ])
# Supply the response through the convenience API.
realm.run(['./responder', '-X', 'X509_user_identity=%s' % dir_enc_identity,
'-p', '%s=%s' % (dir_file_enc_identity, 'encrypted'),
realm.user_princ])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with PKCS12: identity, with no password on the bundle.
realm.run(['./responder', '-x', 'pkinit=',
'-X', 'X509_user_identity=%s' % p12_identity, realm.user_princ])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_identity])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with PKCS12: identity, with a password on the bundle, supplied by the
# prompter.
# Expect failure if the responder does nothing, and we have no prompter.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % p12_enc_identity,
'-X', 'X509_user_identity=%s' % p12_enc_identity, realm.user_princ],
expected_code=2)
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_enc_identity],
password='encrypted')
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with PKCS12: identity, with a password on the bundle, supplied by the
# responder.
# Supply the response in raw form.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % p12_enc_identity,
'-r', 'pkinit={"%s": "encrypted"}' % p12_enc_identity,
'-X', 'X509_user_identity=%s' % p12_enc_identity, realm.user_princ])
# Supply the response through the convenience API.
realm.run(['./responder', '-X', 'X509_user_identity=%s' % p12_enc_identity,
'-p', '%s=%s' % (p12_enc_identity, 'encrypted'),
realm.user_princ])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
if not have_soft_pkcs11:
skip_rest('PKINIT PKCS11 tests', 'soft-pkcs11.so not found')
softpkcs11rc = os.path.join(os.getcwd(), 'testdir', 'soft-pkcs11.rc')
realm.env['SOFTPKCS11RC'] = softpkcs11rc
# PKINIT with PKCS11: identity, with no need for a PIN.
conf = open(softpkcs11rc, 'w')
conf.write("%s\t%s\t%s\t%s\n" % ('user', 'user token', user_pem, privkey_pem))
conf.close()
# Expect to succeed without having to supply any more information.
realm.run(['./responder', '-x', 'pkinit=',
'-X', 'X509_user_identity=%s' % p11_identity, realm.user_princ])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p11_identity])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with PKCS11: identity, with a PIN supplied by the prompter.
os.remove(softpkcs11rc)
conf = open(softpkcs11rc, 'w')
conf.write("%s\t%s\t%s\t%s\n" % ('user', 'user token', user_pem,
privkey_enc_pem))
conf.close()
# Expect failure if the responder does nothing, and there's no prompter
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % p11_token_identity,
'-X', 'X509_user_identity=%s' % p11_identity, realm.user_princ],
expected_code=2)
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p11_identity],
password='encrypted')
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with PKCS11: identity, with a PIN supplied by the responder.
# Supply the response in raw form.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % p11_token_identity,
'-r', 'pkinit={"%s": "encrypted"}' % p11_token_identity,
'-X', 'X509_user_identity=%s' % p11_identity, realm.user_princ])
# Supply the response through the convenience API.
realm.run(['./responder', '-X', 'X509_user_identity=%s' % p11_identity,
'-p', '%s=%s' % (p11_token_identity, 'encrypted'),
realm.user_princ])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
success('PKINIT tests')
| ./CrossVul/dataset_final_sorted/CWE-476/py/bad_4997_1 |
crossvul-python_data_good_4997_1 | #!/usr/bin/python
from k5test import *
# Skip this test if pkinit wasn't built.
if not os.path.exists(os.path.join(plugins, 'preauth', 'pkinit.so')):
skip_rest('PKINIT tests', 'PKINIT module not built')
# Check if soft-pkcs11.so is available.
try:
import ctypes
lib = ctypes.LibraryLoader(ctypes.CDLL).LoadLibrary('soft-pkcs11.so')
del lib
have_soft_pkcs11 = True
except:
have_soft_pkcs11 = False
# Construct a krb5.conf fragment configuring pkinit.
certs = os.path.join(srctop, 'tests', 'dejagnu', 'pkinit-certs')
ca_pem = os.path.join(certs, 'ca.pem')
kdc_pem = os.path.join(certs, 'kdc.pem')
user_pem = os.path.join(certs, 'user.pem')
privkey_pem = os.path.join(certs, 'privkey.pem')
privkey_enc_pem = os.path.join(certs, 'privkey-enc.pem')
user_p12 = os.path.join(certs, 'user.p12')
user_enc_p12 = os.path.join(certs, 'user-enc.p12')
path = os.path.join(os.getcwd(), 'testdir', 'tmp-pkinit-certs')
path_enc = os.path.join(os.getcwd(), 'testdir', 'tmp-pkinit-certs-enc')
pkinit_krb5_conf = {'realms': {'$realm': {
'pkinit_anchors': 'FILE:%s' % ca_pem}}}
pkinit_kdc_conf = {'realms': {'$realm': {
'default_principal_flags': '+preauth',
'pkinit_eku_checking': 'none',
'pkinit_identity': 'FILE:%s,%s' % (kdc_pem, privkey_pem),
'pkinit_indicator': ['indpkinit1', 'indpkinit2']}}}
restrictive_kdc_conf = {'realms': {'$realm': {
'restrict_anonymous_to_tgt': 'true' }}}
file_identity = 'FILE:%s,%s' % (user_pem, privkey_pem)
file_enc_identity = 'FILE:%s,%s' % (user_pem, privkey_enc_pem)
dir_identity = 'DIR:%s' % path
dir_enc_identity = 'DIR:%s' % path_enc
dir_file_identity = 'FILE:%s,%s' % (os.path.join(path, 'user.crt'),
os.path.join(path, 'user.key'))
dir_file_enc_identity = 'FILE:%s,%s' % (os.path.join(path_enc, 'user.crt'),
os.path.join(path_enc, 'user.key'))
p12_identity = 'PKCS12:%s' % user_p12
p12_enc_identity = 'PKCS12:%s' % user_enc_p12
p11_identity = 'PKCS11:soft-pkcs11.so'
p11_token_identity = ('PKCS11:module_name=soft-pkcs11.so:'
'slotid=1:token=SoftToken (token)')
realm = K5Realm(krb5_conf=pkinit_krb5_conf, kdc_conf=pkinit_kdc_conf,
get_creds=False)
# Sanity check - password-based preauth should still work.
realm.run(['./responder', '-r', 'password=%s' % password('user'),
realm.user_princ])
realm.kinit(realm.user_princ, password=password('user'))
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# Test anonymous PKINIT.
out = realm.kinit('@%s' % realm.realm, flags=['-n'], expected_code=1)
if 'not found in Kerberos database' not in out:
fail('Wrong error for anonymous PKINIT without anonymous enabled')
realm.addprinc('WELLKNOWN/ANONYMOUS')
realm.kinit('@%s' % realm.realm, flags=['-n'])
realm.klist('WELLKNOWN/ANONYMOUS@WELLKNOWN:ANONYMOUS')
realm.run([kvno, realm.host_princ])
out = realm.run(['./adata', realm.host_princ])
if '97:' in out:
fail('auth indicators seen in anonymous PKINIT ticket')
# Test anonymous kadmin.
f = open(os.path.join(realm.testdir, 'acl'), 'a')
f.write('WELLKNOWN/ANONYMOUS@WELLKNOWN:ANONYMOUS a *')
f.close()
realm.start_kadmind()
realm.run([kadmin, '-n', 'addprinc', '-pw', 'test', 'testadd'])
out = realm.run([kadmin, '-n', 'getprinc', 'testadd'], expected_code=1)
if "Operation requires ``get'' privilege" not in out:
fail('Anonymous kadmin has too much privilege')
realm.stop_kadmind()
# Test with anonymous restricted; FAST should work but kvno should fail.
r_env = realm.special_env('restrict', True, kdc_conf=restrictive_kdc_conf)
realm.stop_kdc()
realm.start_kdc(env=r_env)
realm.kinit('@%s' % realm.realm, flags=['-n'])
realm.kinit('@%s' % realm.realm, flags=['-n', '-T', realm.ccache])
out = realm.run([kvno, realm.host_princ], expected_code=1)
if 'KDC policy rejects request' not in out:
fail('Wrong error for restricted anonymous PKINIT')
# Regression test for #8458: S4U2Self requests crash the KDC if
# anonymous is restricted.
realm.kinit(realm.host_princ, flags=['-k'])
realm.run([kvno, '-U', 'user', realm.host_princ])
# Go back to a normal KDC and disable anonymous PKINIT.
realm.stop_kdc()
realm.start_kdc()
realm.run([kadminl, 'delprinc', 'WELLKNOWN/ANONYMOUS'])
# Run the basic test - PKINIT with FILE: identity, with no password on the key.
realm.run(['./responder', '-x', 'pkinit=',
'-X', 'X509_user_identity=%s' % file_identity, realm.user_princ])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % file_identity])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# Run the basic test - PKINIT with FILE: identity, with a password on the key,
# supplied by the prompter.
# Expect failure if the responder does nothing, and we have no prompter.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % file_enc_identity,
'-X', 'X509_user_identity=%s' % file_enc_identity, realm.user_princ],
expected_code=2)
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % file_enc_identity],
password='encrypted')
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
out = realm.run(['./adata', realm.host_princ])
if '+97: [indpkinit1, indpkinit2]' not in out:
fail('auth indicators not seen in PKINIT ticket')
# Run the basic test - PKINIT with FILE: identity, with a password on the key,
# supplied by the responder.
# Supply the response in raw form.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % file_enc_identity,
'-r', 'pkinit={"%s": "encrypted"}' % file_enc_identity,
'-X', 'X509_user_identity=%s' % file_enc_identity,
realm.user_princ])
# Supply the response through the convenience API.
realm.run(['./responder', '-X', 'X509_user_identity=%s' % file_enc_identity,
'-p', '%s=%s' % (file_enc_identity, 'encrypted'), realm.user_princ])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with DIR: identity, with no password on the key.
os.mkdir(path)
os.mkdir(path_enc)
shutil.copy(privkey_pem, os.path.join(path, 'user.key'))
shutil.copy(privkey_enc_pem, os.path.join(path_enc, 'user.key'))
shutil.copy(user_pem, os.path.join(path, 'user.crt'))
shutil.copy(user_pem, os.path.join(path_enc, 'user.crt'))
realm.run(['./responder', '-x', 'pkinit=', '-X',
'X509_user_identity=%s' % dir_identity, realm.user_princ])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % dir_identity])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with DIR: identity, with a password on the key, supplied by the
# prompter.
# Expect failure if the responder does nothing, and we have no prompter.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % dir_file_enc_identity,
'-X', 'X509_user_identity=%s' % dir_enc_identity, realm.user_princ],
expected_code=2)
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % dir_enc_identity],
password='encrypted')
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with DIR: identity, with a password on the key, supplied by the
# responder.
# Supply the response in raw form.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % dir_file_enc_identity,
'-r', 'pkinit={"%s": "encrypted"}' % dir_file_enc_identity,
'-X', 'X509_user_identity=%s' % dir_enc_identity, realm.user_princ])
# Supply the response through the convenience API.
realm.run(['./responder', '-X', 'X509_user_identity=%s' % dir_enc_identity,
'-p', '%s=%s' % (dir_file_enc_identity, 'encrypted'),
realm.user_princ])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with PKCS12: identity, with no password on the bundle.
realm.run(['./responder', '-x', 'pkinit=',
'-X', 'X509_user_identity=%s' % p12_identity, realm.user_princ])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_identity])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with PKCS12: identity, with a password on the bundle, supplied by the
# prompter.
# Expect failure if the responder does nothing, and we have no prompter.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % p12_enc_identity,
'-X', 'X509_user_identity=%s' % p12_enc_identity, realm.user_princ],
expected_code=2)
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_enc_identity],
password='encrypted')
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with PKCS12: identity, with a password on the bundle, supplied by the
# responder.
# Supply the response in raw form.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % p12_enc_identity,
'-r', 'pkinit={"%s": "encrypted"}' % p12_enc_identity,
'-X', 'X509_user_identity=%s' % p12_enc_identity, realm.user_princ])
# Supply the response through the convenience API.
realm.run(['./responder', '-X', 'X509_user_identity=%s' % p12_enc_identity,
'-p', '%s=%s' % (p12_enc_identity, 'encrypted'),
realm.user_princ])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
if not have_soft_pkcs11:
skip_rest('PKINIT PKCS11 tests', 'soft-pkcs11.so not found')
softpkcs11rc = os.path.join(os.getcwd(), 'testdir', 'soft-pkcs11.rc')
realm.env['SOFTPKCS11RC'] = softpkcs11rc
# PKINIT with PKCS11: identity, with no need for a PIN.
conf = open(softpkcs11rc, 'w')
conf.write("%s\t%s\t%s\t%s\n" % ('user', 'user token', user_pem, privkey_pem))
conf.close()
# Expect to succeed without having to supply any more information.
realm.run(['./responder', '-x', 'pkinit=',
'-X', 'X509_user_identity=%s' % p11_identity, realm.user_princ])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p11_identity])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with PKCS11: identity, with a PIN supplied by the prompter.
os.remove(softpkcs11rc)
conf = open(softpkcs11rc, 'w')
conf.write("%s\t%s\t%s\t%s\n" % ('user', 'user token', user_pem,
privkey_enc_pem))
conf.close()
# Expect failure if the responder does nothing, and there's no prompter
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % p11_token_identity,
'-X', 'X509_user_identity=%s' % p11_identity, realm.user_princ],
expected_code=2)
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p11_identity],
password='encrypted')
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with PKCS11: identity, with a PIN supplied by the responder.
# Supply the response in raw form.
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % p11_token_identity,
'-r', 'pkinit={"%s": "encrypted"}' % p11_token_identity,
'-X', 'X509_user_identity=%s' % p11_identity, realm.user_princ])
# Supply the response through the convenience API.
realm.run(['./responder', '-X', 'X509_user_identity=%s' % p11_identity,
'-p', '%s=%s' % (p11_token_identity, 'encrypted'),
realm.user_princ])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
success('PKINIT tests')
| ./CrossVul/dataset_final_sorted/CWE-476/py/good_4997_1 |
crossvul-python_data_good_5244_0 |
import base64
import hashlib
import hmac
import struct
import six
import sys
import Crypto.Hash.SHA256
import Crypto.Hash.SHA384
import Crypto.Hash.SHA512
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Util.asn1 import DerSequence
import ecdsa
from jose.constants import ALGORITHMS
from jose.exceptions import JWKError
from jose.utils import base64url_decode
from jose.utils import constant_time_string_compare
# PyCryptodome's RSA module doesn't have PyCrypto's _RSAobj class
# Instead it has a class named RsaKey, which serves the same purpose.
if hasattr(RSA, '_RSAobj'):
_RSAKey = RSA._RSAobj
else:
_RSAKey = RSA.RsaKey
# Deal with integer compatibilities between Python 2 and 3.
# Using `from builtins import int` is not supported on AppEngine.
if sys.version_info > (3,):
long = int
def int_arr_to_long(arr):
return long(''.join(["%02x" % byte for byte in arr]), 16)
def base64_to_long(data):
if isinstance(data, six.text_type):
data = data.encode("ascii")
# urlsafe_b64decode will happily convert b64encoded data
_d = base64.urlsafe_b64decode(bytes(data) + b'==')
return int_arr_to_long(struct.unpack('%sB' % len(_d), _d))
def construct(key_data, algorithm=None):
"""
Construct a Key object for the given algorithm with the given
key_data.
"""
# Allow for pulling the algorithm off of the passed in jwk.
if not algorithm and isinstance(key_data, dict):
algorithm = key_data.get('alg', None)
if not algorithm:
raise JWKError('Unable to find a algorithm for key: %s' % key_data)
if algorithm in ALGORITHMS.HMAC:
return HMACKey(key_data, algorithm)
if algorithm in ALGORITHMS.RSA:
return RSAKey(key_data, algorithm)
if algorithm in ALGORITHMS.EC:
return ECKey(key_data, algorithm)
def get_algorithm_object(algorithm):
algorithms = {
ALGORITHMS.HS256: HMACKey.SHA256,
ALGORITHMS.HS384: HMACKey.SHA384,
ALGORITHMS.HS512: HMACKey.SHA512,
ALGORITHMS.RS256: RSAKey.SHA256,
ALGORITHMS.RS384: RSAKey.SHA384,
ALGORITHMS.RS512: RSAKey.SHA512,
ALGORITHMS.ES256: ECKey.SHA256,
ALGORITHMS.ES384: ECKey.SHA384,
ALGORITHMS.ES512: ECKey.SHA512,
}
return algorithms.get(algorithm, None)
class Key(object):
"""
A simple interface for implementing JWK keys.
"""
prepared_key = None
hash_alg = None
def _process_jwk(self, jwk_dict):
raise NotImplementedError()
def sign(self, msg):
raise NotImplementedError()
def verify(self, msg, sig):
raise NotImplementedError()
class HMACKey(Key):
"""
Performs signing and verification operations using HMAC
and the specified hash function.
"""
SHA256 = hashlib.sha256
SHA384 = hashlib.sha384
SHA512 = hashlib.sha512
valid_hash_algs = ALGORITHMS.HMAC
prepared_key = None
hash_alg = None
def __init__(self, key, algorithm):
if algorithm not in self.valid_hash_algs:
raise JWKError('hash_alg: %s is not a valid hash algorithm' % algorithm)
self.hash_alg = get_algorithm_object(algorithm)
if isinstance(key, dict):
self.prepared_key = self._process_jwk(key)
return
if not isinstance(key, six.string_types) and not isinstance(key, bytes):
raise JWKError('Expecting a string- or bytes-formatted key.')
if isinstance(key, six.text_type):
key = key.encode('utf-8')
invalid_strings = [
b'-----BEGIN PUBLIC KEY-----',
b'-----BEGIN CERTIFICATE-----',
b'ssh-rsa'
]
if any([string_value in key for string_value in invalid_strings]):
raise JWKError(
'The specified key is an asymmetric key or x509 certificate and'
' should not be used as an HMAC secret.')
self.prepared_key = key
def _process_jwk(self, jwk_dict):
if not jwk_dict.get('kty') == 'oct':
raise JWKError("Incorrect key type. Expected: 'oct', Recieved: %s" % jwk_dict.get('kty'))
k = jwk_dict.get('k')
k = k.encode('utf-8')
k = bytes(k)
k = base64url_decode(k)
return k
def sign(self, msg):
return hmac.new(self.prepared_key, msg, self.hash_alg).digest()
def verify(self, msg, sig):
return constant_time_string_compare(sig, self.sign(msg))
class RSAKey(Key):
"""
Performs signing and verification operations using
RSASSA-PKCS-v1_5 and the specified hash function.
This class requires PyCrypto package to be installed.
This is based off of the implementation in PyJWT 0.3.2
"""
SHA256 = Crypto.Hash.SHA256
SHA384 = Crypto.Hash.SHA384
SHA512 = Crypto.Hash.SHA512
valid_hash_algs = ALGORITHMS.RSA
prepared_key = None
hash_alg = None
def __init__(self, key, algorithm):
if algorithm not in self.valid_hash_algs:
raise JWKError('hash_alg: %s is not a valid hash algorithm' % algorithm)
self.hash_alg = get_algorithm_object(algorithm)
if isinstance(key, _RSAKey):
self.prepared_key = key
return
if isinstance(key, dict):
self._process_jwk(key)
return
if isinstance(key, six.string_types):
if isinstance(key, six.text_type):
key = key.encode('utf-8')
if key.startswith(b'-----BEGIN CERTIFICATE-----'):
try:
self._process_cert(key)
except Exception as e:
raise JWKError(e)
return
try:
self.prepared_key = RSA.importKey(key)
except Exception as e:
raise JWKError(e)
return
raise JWKError('Unable to parse an RSA_JWK from key: %s' % key)
def _process_jwk(self, jwk_dict):
if not jwk_dict.get('kty') == 'RSA':
raise JWKError("Incorrect key type. Expected: 'RSA', Recieved: %s" % jwk_dict.get('kty'))
e = base64_to_long(jwk_dict.get('e', 256))
n = base64_to_long(jwk_dict.get('n'))
self.prepared_key = RSA.construct((n, e))
return self.prepared_key
def _process_cert(self, key):
pemLines = key.replace(b' ', b'').split()
certDer = base64url_decode(b''.join(pemLines[1:-1]))
certSeq = DerSequence()
certSeq.decode(certDer)
tbsSeq = DerSequence()
tbsSeq.decode(certSeq[0])
self.prepared_key = RSA.importKey(tbsSeq[6])
return
def sign(self, msg):
try:
return PKCS1_v1_5.new(self.prepared_key).sign(self.hash_alg.new(msg))
except Exception as e:
raise JWKError(e)
def verify(self, msg, sig):
try:
return PKCS1_v1_5.new(self.prepared_key).verify(self.hash_alg.new(msg), sig)
except Exception as e:
raise JWKError(e)
class ECKey(Key):
"""
Performs signing and verification operations using
ECDSA and the specified hash function
This class requires the ecdsa package to be installed.
This is based off of the implementation in PyJWT 0.3.2
"""
SHA256 = hashlib.sha256
SHA384 = hashlib.sha384
SHA512 = hashlib.sha512
valid_hash_algs = ALGORITHMS.EC
curve_map = {
SHA256: ecdsa.curves.NIST256p,
SHA384: ecdsa.curves.NIST384p,
SHA512: ecdsa.curves.NIST521p,
}
prepared_key = None
hash_alg = None
curve = None
def __init__(self, key, algorithm):
if algorithm not in self.valid_hash_algs:
raise JWKError('hash_alg: %s is not a valid hash algorithm' % algorithm)
self.hash_alg = get_algorithm_object(algorithm)
self.curve = self.curve_map.get(self.hash_alg)
if isinstance(key, (ecdsa.SigningKey, ecdsa.VerifyingKey)):
self.prepared_key = key
return
if isinstance(key, dict):
self.prepared_key = self._process_jwk(key)
return
if isinstance(key, six.string_types):
if isinstance(key, six.text_type):
key = key.encode('utf-8')
# Attempt to load key. We don't know if it's
# a Signing Key or a Verifying Key, so we try
# the Verifying Key first.
try:
key = ecdsa.VerifyingKey.from_pem(key)
except ecdsa.der.UnexpectedDER:
key = ecdsa.SigningKey.from_pem(key)
except Exception as e:
raise JWKError(e)
self.prepared_key = key
return
raise JWKError('Unable to parse an ECKey from key: %s' % key)
def _process_jwk(self, jwk_dict):
if not jwk_dict.get('kty') == 'EC':
raise JWKError("Incorrect key type. Expected: 'EC', Recieved: %s" % jwk_dict.get('kty'))
x = base64_to_long(jwk_dict.get('x'))
y = base64_to_long(jwk_dict.get('y'))
if not ecdsa.ecdsa.point_is_valid(self.curve.generator, x, y):
raise JWKError("Point: %s, %s is not a valid point" % (x, y))
point = ecdsa.ellipticcurve.Point(self.curve.curve, x, y, self.curve.order)
verifying_key = ecdsa.keys.VerifyingKey.from_public_point(point, self.curve)
return verifying_key
def sign(self, msg):
return self.prepared_key.sign(msg, hashfunc=self.hash_alg, sigencode=ecdsa.util.sigencode_string)
def verify(self, msg, sig):
try:
return self.prepared_key.verify(sig, msg, hashfunc=self.hash_alg, sigdecode=ecdsa.util.sigdecode_string)
except:
return False
| ./CrossVul/dataset_final_sorted/CWE-361/py/good_5244_0 |
crossvul-python_data_bad_5244_0 |
import base64
import hashlib
import hmac
import struct
import six
import sys
import Crypto.Hash.SHA256
import Crypto.Hash.SHA384
import Crypto.Hash.SHA512
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Util.asn1 import DerSequence
import ecdsa
from jose.constants import ALGORITHMS
from jose.exceptions import JWKError
from jose.utils import base64url_decode
# PyCryptodome's RSA module doesn't have PyCrypto's _RSAobj class
# Instead it has a class named RsaKey, which serves the same purpose.
if hasattr(RSA, '_RSAobj'):
_RSAKey = RSA._RSAobj
else:
_RSAKey = RSA.RsaKey
# Deal with integer compatibilities between Python 2 and 3.
# Using `from builtins import int` is not supported on AppEngine.
if sys.version_info > (3,):
long = int
def int_arr_to_long(arr):
return long(''.join(["%02x" % byte for byte in arr]), 16)
def base64_to_long(data):
if isinstance(data, six.text_type):
data = data.encode("ascii")
# urlsafe_b64decode will happily convert b64encoded data
_d = base64.urlsafe_b64decode(bytes(data) + b'==')
return int_arr_to_long(struct.unpack('%sB' % len(_d), _d))
def construct(key_data, algorithm=None):
"""
Construct a Key object for the given algorithm with the given
key_data.
"""
# Allow for pulling the algorithm off of the passed in jwk.
if not algorithm and isinstance(key_data, dict):
algorithm = key_data.get('alg', None)
if not algorithm:
raise JWKError('Unable to find a algorithm for key: %s' % key_data)
if algorithm in ALGORITHMS.HMAC:
return HMACKey(key_data, algorithm)
if algorithm in ALGORITHMS.RSA:
return RSAKey(key_data, algorithm)
if algorithm in ALGORITHMS.EC:
return ECKey(key_data, algorithm)
def get_algorithm_object(algorithm):
algorithms = {
ALGORITHMS.HS256: HMACKey.SHA256,
ALGORITHMS.HS384: HMACKey.SHA384,
ALGORITHMS.HS512: HMACKey.SHA512,
ALGORITHMS.RS256: RSAKey.SHA256,
ALGORITHMS.RS384: RSAKey.SHA384,
ALGORITHMS.RS512: RSAKey.SHA512,
ALGORITHMS.ES256: ECKey.SHA256,
ALGORITHMS.ES384: ECKey.SHA384,
ALGORITHMS.ES512: ECKey.SHA512,
}
return algorithms.get(algorithm, None)
class Key(object):
"""
A simple interface for implementing JWK keys.
"""
prepared_key = None
hash_alg = None
def _process_jwk(self, jwk_dict):
raise NotImplementedError()
def sign(self, msg):
raise NotImplementedError()
def verify(self, msg, sig):
raise NotImplementedError()
class HMACKey(Key):
"""
Performs signing and verification operations using HMAC
and the specified hash function.
"""
SHA256 = hashlib.sha256
SHA384 = hashlib.sha384
SHA512 = hashlib.sha512
valid_hash_algs = ALGORITHMS.HMAC
prepared_key = None
hash_alg = None
def __init__(self, key, algorithm):
if algorithm not in self.valid_hash_algs:
raise JWKError('hash_alg: %s is not a valid hash algorithm' % algorithm)
self.hash_alg = get_algorithm_object(algorithm)
if isinstance(key, dict):
self.prepared_key = self._process_jwk(key)
return
if not isinstance(key, six.string_types) and not isinstance(key, bytes):
raise JWKError('Expecting a string- or bytes-formatted key.')
if isinstance(key, six.text_type):
key = key.encode('utf-8')
invalid_strings = [
b'-----BEGIN PUBLIC KEY-----',
b'-----BEGIN CERTIFICATE-----',
b'ssh-rsa'
]
if any([string_value in key for string_value in invalid_strings]):
raise JWKError(
'The specified key is an asymmetric key or x509 certificate and'
' should not be used as an HMAC secret.')
self.prepared_key = key
def _process_jwk(self, jwk_dict):
if not jwk_dict.get('kty') == 'oct':
raise JWKError("Incorrect key type. Expected: 'oct', Recieved: %s" % jwk_dict.get('kty'))
k = jwk_dict.get('k')
k = k.encode('utf-8')
k = bytes(k)
k = base64url_decode(k)
return k
def sign(self, msg):
return hmac.new(self.prepared_key, msg, self.hash_alg).digest()
def verify(self, msg, sig):
return sig == self.sign(msg)
class RSAKey(Key):
"""
Performs signing and verification operations using
RSASSA-PKCS-v1_5 and the specified hash function.
This class requires PyCrypto package to be installed.
This is based off of the implementation in PyJWT 0.3.2
"""
SHA256 = Crypto.Hash.SHA256
SHA384 = Crypto.Hash.SHA384
SHA512 = Crypto.Hash.SHA512
valid_hash_algs = ALGORITHMS.RSA
prepared_key = None
hash_alg = None
def __init__(self, key, algorithm):
if algorithm not in self.valid_hash_algs:
raise JWKError('hash_alg: %s is not a valid hash algorithm' % algorithm)
self.hash_alg = get_algorithm_object(algorithm)
if isinstance(key, _RSAKey):
self.prepared_key = key
return
if isinstance(key, dict):
self._process_jwk(key)
return
if isinstance(key, six.string_types):
if isinstance(key, six.text_type):
key = key.encode('utf-8')
if key.startswith(b'-----BEGIN CERTIFICATE-----'):
try:
self._process_cert(key)
except Exception as e:
raise JWKError(e)
return
try:
self.prepared_key = RSA.importKey(key)
except Exception as e:
raise JWKError(e)
return
raise JWKError('Unable to parse an RSA_JWK from key: %s' % key)
def _process_jwk(self, jwk_dict):
if not jwk_dict.get('kty') == 'RSA':
raise JWKError("Incorrect key type. Expected: 'RSA', Recieved: %s" % jwk_dict.get('kty'))
e = base64_to_long(jwk_dict.get('e', 256))
n = base64_to_long(jwk_dict.get('n'))
self.prepared_key = RSA.construct((n, e))
return self.prepared_key
def _process_cert(self, key):
pemLines = key.replace(b' ', b'').split()
certDer = base64url_decode(b''.join(pemLines[1:-1]))
certSeq = DerSequence()
certSeq.decode(certDer)
tbsSeq = DerSequence()
tbsSeq.decode(certSeq[0])
self.prepared_key = RSA.importKey(tbsSeq[6])
return
def sign(self, msg):
try:
return PKCS1_v1_5.new(self.prepared_key).sign(self.hash_alg.new(msg))
except Exception as e:
raise JWKError(e)
def verify(self, msg, sig):
try:
return PKCS1_v1_5.new(self.prepared_key).verify(self.hash_alg.new(msg), sig)
except Exception as e:
raise JWKError(e)
class ECKey(Key):
"""
Performs signing and verification operations using
ECDSA and the specified hash function
This class requires the ecdsa package to be installed.
This is based off of the implementation in PyJWT 0.3.2
"""
SHA256 = hashlib.sha256
SHA384 = hashlib.sha384
SHA512 = hashlib.sha512
valid_hash_algs = ALGORITHMS.EC
curve_map = {
SHA256: ecdsa.curves.NIST256p,
SHA384: ecdsa.curves.NIST384p,
SHA512: ecdsa.curves.NIST521p,
}
prepared_key = None
hash_alg = None
curve = None
def __init__(self, key, algorithm):
if algorithm not in self.valid_hash_algs:
raise JWKError('hash_alg: %s is not a valid hash algorithm' % algorithm)
self.hash_alg = get_algorithm_object(algorithm)
self.curve = self.curve_map.get(self.hash_alg)
if isinstance(key, (ecdsa.SigningKey, ecdsa.VerifyingKey)):
self.prepared_key = key
return
if isinstance(key, dict):
self.prepared_key = self._process_jwk(key)
return
if isinstance(key, six.string_types):
if isinstance(key, six.text_type):
key = key.encode('utf-8')
# Attempt to load key. We don't know if it's
# a Signing Key or a Verifying Key, so we try
# the Verifying Key first.
try:
key = ecdsa.VerifyingKey.from_pem(key)
except ecdsa.der.UnexpectedDER:
key = ecdsa.SigningKey.from_pem(key)
except Exception as e:
raise JWKError(e)
self.prepared_key = key
return
raise JWKError('Unable to parse an ECKey from key: %s' % key)
def _process_jwk(self, jwk_dict):
if not jwk_dict.get('kty') == 'EC':
raise JWKError("Incorrect key type. Expected: 'EC', Recieved: %s" % jwk_dict.get('kty'))
x = base64_to_long(jwk_dict.get('x'))
y = base64_to_long(jwk_dict.get('y'))
if not ecdsa.ecdsa.point_is_valid(self.curve.generator, x, y):
raise JWKError("Point: %s, %s is not a valid point" % (x, y))
point = ecdsa.ellipticcurve.Point(self.curve.curve, x, y, self.curve.order)
verifying_key = ecdsa.keys.VerifyingKey.from_public_point(point, self.curve)
return verifying_key
def sign(self, msg):
return self.prepared_key.sign(msg, hashfunc=self.hash_alg, sigencode=ecdsa.util.sigencode_string)
def verify(self, msg, sig):
try:
return self.prepared_key.verify(sig, msg, hashfunc=self.hash_alg, sigdecode=ecdsa.util.sigdecode_string)
except:
return False
| ./CrossVul/dataset_final_sorted/CWE-361/py/bad_5244_0 |
crossvul-python_data_good_5244_1 |
import base64
import hmac
def calculate_at_hash(access_token, hash_alg):
"""Helper method for calculating an access token
hash, as described in http://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
Its value is the base64url encoding of the left-most half of the hash of the octets
of the ASCII representation of the access_token value, where the hash algorithm
used is the hash algorithm used in the alg Header Parameter of the ID Token's JOSE
Header. For instance, if the alg is RS256, hash the access_token value with SHA-256,
then take the left-most 128 bits and base64url encode them. The at_hash value is a
case sensitive string.
Args:
access_token (str): An access token string.
hash_alg (callable): A callable returning a hash object, e.g. hashlib.sha256
"""
hash_digest = hash_alg(access_token.encode('utf-8')).digest()
cut_at = int(len(hash_digest) / 2)
truncated = hash_digest[:cut_at]
at_hash = base64url_encode(truncated)
return at_hash.decode('utf-8')
def base64url_decode(input):
"""Helper method to base64url_decode a string.
Args:
input (str): A base64url_encoded string to decode.
"""
rem = len(input) % 4
if rem > 0:
input += b'=' * (4 - rem)
return base64.urlsafe_b64decode(input)
def base64url_encode(input):
"""Helper method to base64url_encode a string.
Args:
input (str): A base64url_encoded string to encode.
"""
return base64.urlsafe_b64encode(input).replace(b'=', b'')
def timedelta_total_seconds(delta):
"""Helper method to determine the total number of seconds
from a timedelta.
Args:
delta (timedelta): A timedelta to convert to seconds.
"""
return delta.days * 24 * 60 * 60 + delta.seconds
def constant_time_string_compare(a, b):
"""Helper for comparing string in constant time, independent
of the python version being used.
Args:
a (str): A string to compare
b (str): A string to compare
"""
try:
return hmac.compare_digest(a, b)
except AttributeError:
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
| ./CrossVul/dataset_final_sorted/CWE-361/py/good_5244_1 |
crossvul-python_data_bad_5244_1 |
import base64
def calculate_at_hash(access_token, hash_alg):
"""Helper method for calculating an access token
hash, as described in http://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
Its value is the base64url encoding of the left-most half of the hash of the octets
of the ASCII representation of the access_token value, where the hash algorithm
used is the hash algorithm used in the alg Header Parameter of the ID Token's JOSE
Header. For instance, if the alg is RS256, hash the access_token value with SHA-256,
then take the left-most 128 bits and base64url encode them. The at_hash value is a
case sensitive string.
Args:
access_token (str): An access token string.
hash_alg (callable): A callable returning a hash object, e.g. hashlib.sha256
"""
hash_digest = hash_alg(access_token.encode('utf-8')).digest()
cut_at = int(len(hash_digest) / 2)
truncated = hash_digest[:cut_at]
at_hash = base64url_encode(truncated)
return at_hash.decode('utf-8')
def base64url_decode(input):
"""Helper method to base64url_decode a string.
Args:
input (str): A base64url_encoded string to decode.
"""
rem = len(input) % 4
if rem > 0:
input += b'=' * (4 - rem)
return base64.urlsafe_b64decode(input)
def base64url_encode(input):
"""Helper method to base64url_encode a string.
Args:
input (str): A base64url_encoded string to encode.
"""
return base64.urlsafe_b64encode(input).replace(b'=', b'')
def timedelta_total_seconds(delta):
"""Helper method to determine the total number of seconds
from a timedelta.
Args:
delta (timedelta): A timedelta to convert to seconds.
"""
return delta.days * 24 * 60 * 60 + delta.seconds
| ./CrossVul/dataset_final_sorted/CWE-361/py/bad_5244_1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.