id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8,800
|
utils.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/utils.py
|
"""
oauthlib.utils
~~~~~~~~~~~~~~
This module contains utility methods used by various parts of the OAuth 2 spec.
"""
import datetime
import os
from urllib.parse import quote, urlparse
from oauthlib.common import urldecode
def list_to_scope(scope):
"""Convert a list of scopes to a space separated string."""
if isinstance(scope, str) or scope is None:
return scope
elif isinstance(scope, (set, tuple, list)):
return " ".join([str(s) for s in scope])
else:
raise ValueError("Invalid scope (%s), must be string, tuple, set, or list." % scope)
def scope_to_list(scope):
"""Convert a space separated string to a list of scopes."""
if isinstance(scope, (tuple, list, set)):
return [str(s) for s in scope]
elif scope is None:
return None
else:
return scope.strip().split(" ")
def params_from_uri(uri):
params = dict(urldecode(urlparse(uri).query))
if 'scope' in params:
params['scope'] = scope_to_list(params['scope'])
return params
def host_from_uri(uri):
"""Extract hostname and port from URI.
Will use default port for HTTP and HTTPS if none is present in the URI.
"""
default_ports = {
'HTTP': '80',
'HTTPS': '443',
}
sch, netloc, path, par, query, fra = urlparse(uri)
if ':' in netloc:
netloc, port = netloc.split(':', 1)
else:
port = default_ports.get(sch.upper())
return netloc, port
def escape(u):
"""Escape a string in an OAuth-compatible fashion.
TODO: verify whether this can in fact be used for OAuth 2
"""
if not isinstance(u, str):
raise ValueError('Only unicode objects are escapable.')
return quote(u.encode('utf-8'), safe=b'~')
def generate_age(issue_time):
"""Generate a age parameter for MAC authentication draft 00."""
td = datetime.datetime.now() - issue_time
age = (td.microseconds + (td.seconds + td.days * 24 * 3600)
* 10 ** 6) / 10 ** 6
return str(age)
def is_secure_transport(uri):
"""Check if the uri is over ssl."""
if os.environ.get('OAUTHLIB_INSECURE_TRANSPORT'):
return True
return uri.lower().startswith('https://')
| 2,207
|
Python
|
.py
| 62
| 30.33871
| 92
| 0.647363
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,801
|
__init__.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/__init__.py
|
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
import functools
import logging
from .endpoints.base import BaseEndpoint, catch_errors_and_unavailability
from .errors import (
FatalClientError, OAuth2Error, ServerError, TemporarilyUnavailableError,
)
log = logging.getLogger(__name__)
| 404
|
Python
|
.py
| 13
| 29.538462
| 76
| 0.778351
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,802
|
backend_application.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/clients/backend_application.py
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from ..parameters import prepare_token_request
from .base import Client
class BackendApplicationClient(Client):
"""A public client utilizing the client credentials grant workflow.
The client can request an access token using only its client
credentials (or other supported means of authentication) when the
client is requesting access to the protected resources under its
control, or those of another resource owner which has been previously
arranged with the authorization server (the method of which is beyond
the scope of this specification).
The client credentials grant type MUST only be used by confidential
clients.
Since the client authentication is used as the authorization grant,
no additional authorization request is needed.
"""
grant_type = 'client_credentials'
def prepare_request_body(self, body='', scope=None,
include_client_id=False, **kwargs):
"""Add the client credentials to the request body.
The client makes a request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format per `Appendix B`_ in the HTTP request entity-body:
:param body: Existing request body (URL encoded string) to embed parameters
into. This may contain extra paramters. Default ''.
:param scope: The scope of the access request as described by
`Section 3.3`_.
:param include_client_id: `True` to send the `client_id` in the
body of the upstream request. This is required
if the client is not authenticating with the
authorization server as described in
`Section 3.2.1`_. False otherwise (default).
:type include_client_id: Boolean
:param kwargs: Extra credentials to include in the token request.
The client MUST authenticate with the authorization server as
described in `Section 3.2.1`_.
The prepared body will include all provided credentials as well as
the ``grant_type`` parameter set to ``client_credentials``::
>>> from oauthlib.oauth2 import BackendApplicationClient
>>> client = BackendApplicationClient('your_id')
>>> client.prepare_request_body(scope=['hello', 'world'])
'grant_type=client_credentials&scope=hello+world'
.. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
.. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 3.2.1`: https://tools.ietf.org/html/rfc6749#section-3.2.1
"""
kwargs['client_id'] = self.client_id
kwargs['include_client_id'] = include_client_id
scope = self.scope if scope is None else scope
return prepare_token_request(self.grant_type, body=body,
scope=scope, **kwargs)
| 3,223
|
Python
|
.py
| 57
| 46.140351
| 83
| 0.656716
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,803
|
web_application.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/clients/web_application.py
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
import warnings
from ..parameters import (
parse_authorization_code_response, prepare_grant_uri,
prepare_token_request,
)
from .base import Client
class WebApplicationClient(Client):
"""A client utilizing the authorization code grant workflow.
A web application is a confidential client running on a web
server. Resource owners access the client via an HTML user
interface rendered in a user-agent on the device used by the
resource owner. The client credentials as well as any access
token issued to the client are stored on the web server and are
not exposed to or accessible by the resource owner.
The authorization code grant type is used to obtain both access
tokens and refresh tokens and is optimized for confidential clients.
As a redirection-based flow, the client must be capable of
interacting with the resource owner's user-agent (typically a web
browser) and capable of receiving incoming requests (via redirection)
from the authorization server.
"""
grant_type = 'authorization_code'
def __init__(self, client_id, code=None, **kwargs):
super().__init__(client_id, **kwargs)
self.code = code
def prepare_request_uri(self, uri, redirect_uri=None, scope=None,
state=None, **kwargs):
"""Prepare the authorization code request URI
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
:param redirect_uri: OPTIONAL. The redirect URI must be an absolute URI
and it should have been registerd with the OAuth
provider prior to use. As described in `Section 3.1.2`_.
:param scope: OPTIONAL. The scope of the access request as described by
Section 3.3`_. These may be any string but are commonly
URIs or various categories such as ``videos`` or ``documents``.
:param state: RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in `Section 10.12`_.
:param kwargs: Extra arguments to include in the request URI.
In addition to supplied parameters, OAuthLib will append the ``client_id``
that was provided in the constructor as well as the mandatory ``response_type``
argument, set to ``code``::
>>> from oauthlib.oauth2 import WebApplicationClient
>>> client = WebApplicationClient('your_id')
>>> client.prepare_request_uri('https://example.com')
'https://example.com?client_id=your_id&response_type=code'
>>> client.prepare_request_uri('https://example.com', redirect_uri='https://a.b/callback')
'https://example.com?client_id=your_id&response_type=code&redirect_uri=https%3A%2F%2Fa.b%2Fcallback'
>>> client.prepare_request_uri('https://example.com', scope=['profile', 'pictures'])
'https://example.com?client_id=your_id&response_type=code&scope=profile+pictures'
>>> client.prepare_request_uri('https://example.com', foo='bar')
'https://example.com?client_id=your_id&response_type=code&foo=bar'
.. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
.. _`Section 2.2`: https://tools.ietf.org/html/rfc6749#section-2.2
.. _`Section 3.1.2`: https://tools.ietf.org/html/rfc6749#section-3.1.2
.. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 10.12`: https://tools.ietf.org/html/rfc6749#section-10.12
"""
scope = self.scope if scope is None else scope
return prepare_grant_uri(uri, self.client_id, 'code',
redirect_uri=redirect_uri, scope=scope, state=state, **kwargs)
def prepare_request_body(self, code=None, redirect_uri=None, body='',
include_client_id=True, **kwargs):
"""Prepare the access token request body.
The client makes a request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format in the HTTP request entity-body:
:param code: REQUIRED. The authorization code received from the
authorization server.
:param redirect_uri: REQUIRED, if the "redirect_uri" parameter was included in the
authorization request as described in `Section 4.1.1`_, and their
values MUST be identical.
:param body: Existing request body (URL encoded string) to embed parameters
into. This may contain extra paramters. Default ''.
:param include_client_id: `True` (default) to send the `client_id` in the
body of the upstream request. This is required
if the client is not authenticating with the
authorization server as described in `Section 3.2.1`_.
:type include_client_id: Boolean
:param kwargs: Extra parameters to include in the token request.
In addition OAuthLib will add the ``grant_type`` parameter set to
``authorization_code``.
If the client type is confidential or the client was issued client
credentials (or assigned other authentication requirements), the
client MUST authenticate with the authorization server as described
in `Section 3.2.1`_::
>>> from oauthlib.oauth2 import WebApplicationClient
>>> client = WebApplicationClient('your_id')
>>> client.prepare_request_body(code='sh35ksdf09sf')
'grant_type=authorization_code&code=sh35ksdf09sf'
>>> client.prepare_request_body(code='sh35ksdf09sf', foo='bar')
'grant_type=authorization_code&code=sh35ksdf09sf&foo=bar'
`Section 3.2.1` also states:
In the "authorization_code" "grant_type" request to the token
endpoint, an unauthenticated client MUST send its "client_id" to
prevent itself from inadvertently accepting a code intended for a
client with a different "client_id". This protects the client from
substitution of the authentication code. (It provides no additional
security for the protected resource.)
.. _`Section 4.1.1`: https://tools.ietf.org/html/rfc6749#section-4.1.1
.. _`Section 3.2.1`: https://tools.ietf.org/html/rfc6749#section-3.2.1
"""
code = code or self.code
if 'client_id' in kwargs:
warnings.warn("`client_id` has been deprecated in favor of "
"`include_client_id`, a boolean value which will "
"include the already configured `self.client_id`.",
DeprecationWarning)
if kwargs['client_id'] != self.client_id:
raise ValueError("`client_id` was supplied as an argument, but "
"it does not match `self.client_id`")
kwargs['client_id'] = self.client_id
kwargs['include_client_id'] = include_client_id
return prepare_token_request(self.grant_type, code=code, body=body,
redirect_uri=redirect_uri, **kwargs)
def parse_request_uri_response(self, uri, state=None):
"""Parse the URI query for code and state.
If the resource owner grants the access request, the authorization
server issues an authorization code and delivers it to the client by
adding the following parameters to the query component of the
redirection URI using the "application/x-www-form-urlencoded" format:
:param uri: The callback URI that resulted from the user being redirected
back from the provider to you, the client.
:param state: The state provided in the authorization request.
**code**
The authorization code generated by the authorization server.
The authorization code MUST expire shortly after it is issued
to mitigate the risk of leaks. A maximum authorization code
lifetime of 10 minutes is RECOMMENDED. The client MUST NOT
use the authorization code more than once. If an authorization
code is used more than once, the authorization server MUST deny
the request and SHOULD revoke (when possible) all tokens
previously issued based on that authorization code.
The authorization code is bound to the client identifier and
redirection URI.
**state**
If the "state" parameter was present in the authorization request.
This method is mainly intended to enforce strict state checking with
the added benefit of easily extracting parameters from the URI::
>>> from oauthlib.oauth2 import WebApplicationClient
>>> client = WebApplicationClient('your_id')
>>> uri = 'https://example.com/callback?code=sdfkjh345&state=sfetw45'
>>> client.parse_request_uri_response(uri, state='sfetw45')
{'state': 'sfetw45', 'code': 'sdfkjh345'}
>>> client.parse_request_uri_response(uri, state='other')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "oauthlib/oauth2/rfc6749/__init__.py", line 357, in parse_request_uri_response
back from the provider to you, the client.
File "oauthlib/oauth2/rfc6749/parameters.py", line 153, in parse_authorization_code_response
raise MismatchingStateError()
oauthlib.oauth2.rfc6749.errors.MismatchingStateError
"""
response = parse_authorization_code_response(uri, state=state)
self.populate_code_attributes(response)
return response
| 10,615
|
Python
|
.py
| 167
| 51.131737
| 112
| 0.645493
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,804
|
service_application.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/clients/service_application.py
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
import time
from oauthlib.common import to_unicode
from ..parameters import prepare_token_request
from .base import Client
class ServiceApplicationClient(Client):
"""A public client utilizing the JWT bearer grant.
JWT bearer tokes can be used to request an access token when a client
wishes to utilize an existing trust relationship, expressed through the
semantics of (and digital signature or keyed message digest calculated
over) the JWT, without a direct user approval step at the authorization
server.
This grant type does not involve an authorization step. It may be
used by both public and confidential clients.
"""
grant_type = 'urn:ietf:params:oauth:grant-type:jwt-bearer'
def __init__(self, client_id, private_key=None, subject=None, issuer=None,
audience=None, **kwargs):
"""Initalize a JWT client with defaults for implicit use later.
:param client_id: Client identifier given by the OAuth provider upon
registration.
:param private_key: Private key used for signing and encrypting.
Must be given as a string.
:param subject: The principal that is the subject of the JWT, i.e.
which user is the token requested on behalf of.
For example, ``foo@example.com.
:param issuer: The JWT MUST contain an "iss" (issuer) claim that
contains a unique identifier for the entity that issued
the JWT. For example, ``your-client@provider.com``.
:param audience: A value identifying the authorization server as an
intended audience, e.g.
``https://provider.com/oauth2/token``.
:param kwargs: Additional arguments to pass to base client, such as
state and token. See ``Client.__init__.__doc__`` for
details.
"""
super().__init__(client_id, **kwargs)
self.private_key = private_key
self.subject = subject
self.issuer = issuer
self.audience = audience
def prepare_request_body(self,
private_key=None,
subject=None,
issuer=None,
audience=None,
expires_at=None,
issued_at=None,
extra_claims=None,
body='',
scope=None,
include_client_id=False,
**kwargs):
"""Create and add a JWT assertion to the request body.
:param private_key: Private key used for signing and encrypting.
Must be given as a string.
:param subject: (sub) The principal that is the subject of the JWT,
i.e. which user is the token requested on behalf of.
For example, ``foo@example.com.
:param issuer: (iss) The JWT MUST contain an "iss" (issuer) claim that
contains a unique identifier for the entity that issued
the JWT. For example, ``your-client@provider.com``.
:param audience: (aud) A value identifying the authorization server as an
intended audience, e.g.
``https://provider.com/oauth2/token``.
:param expires_at: A unix expiration timestamp for the JWT. Defaults
to an hour from now, i.e. ``time.time() + 3600``.
:param issued_at: A unix timestamp of when the JWT was created.
Defaults to now, i.e. ``time.time()``.
:param extra_claims: A dict of additional claims to include in the JWT.
:param body: Existing request body (URL encoded string) to embed parameters
into. This may contain extra paramters. Default ''.
:param scope: The scope of the access request.
:param include_client_id: `True` to send the `client_id` in the
body of the upstream request. This is required
if the client is not authenticating with the
authorization server as described in
`Section 3.2.1`_. False otherwise (default).
:type include_client_id: Boolean
:param not_before: A unix timestamp after which the JWT may be used.
Not included unless provided. *
:param jwt_id: A unique JWT token identifier. Not included unless
provided. *
:param kwargs: Extra credentials to include in the token request.
Parameters marked with a `*` above are not explicit arguments in the
function signature, but are specially documented arguments for items
appearing in the generic `**kwargs` keyworded input.
The "scope" parameter may be used, as defined in the Assertion
Framework for OAuth 2.0 Client Authentication and Authorization Grants
[I-D.ietf-oauth-assertions] specification, to indicate the requested
scope.
Authentication of the client is optional, as described in
`Section 3.2.1`_ of OAuth 2.0 [RFC6749] and consequently, the
"client_id" is only needed when a form of client authentication that
relies on the parameter is used.
The following non-normative example demonstrates an Access Token
Request with a JWT as an authorization grant (with extra line breaks
for display purposes only):
.. code-block: http
POST /token.oauth2 HTTP/1.1
Host: as.example.com
Content-Type: application/x-www-form-urlencoded
grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer
&assertion=eyJhbGciOiJFUzI1NiJ9.
eyJpc3Mi[...omitted for brevity...].
J9l-ZhwP[...omitted for brevity...]
.. _`Section 3.2.1`: https://tools.ietf.org/html/rfc6749#section-3.2.1
"""
import jwt
key = private_key or self.private_key
if not key:
raise ValueError('An encryption key must be supplied to make JWT'
' token requests.')
claim = {
'iss': issuer or self.issuer,
'aud': audience or self.audience,
'sub': subject or self.subject,
'exp': int(expires_at or time.time() + 3600),
'iat': int(issued_at or time.time()),
}
for attr in ('iss', 'aud', 'sub'):
if claim[attr] is None:
raise ValueError(
'Claim must include %s but none was given.' % attr)
if 'not_before' in kwargs:
claim['nbf'] = kwargs.pop('not_before')
if 'jwt_id' in kwargs:
claim['jti'] = kwargs.pop('jwt_id')
claim.update(extra_claims or {})
assertion = jwt.encode(claim, key, 'RS256')
assertion = to_unicode(assertion)
kwargs['client_id'] = self.client_id
kwargs['include_client_id'] = include_client_id
scope = self.scope if scope is None else scope
return prepare_token_request(self.grant_type,
body=body,
assertion=assertion,
scope=scope,
**kwargs)
| 7,810
|
Python
|
.py
| 145
| 39.117241
| 83
| 0.578139
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,805
|
__init__.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/clients/__init__.py
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming OAuth 2.0 RFC6749.
"""
from .backend_application import BackendApplicationClient
from .base import AUTH_HEADER, BODY, URI_QUERY, Client
from .legacy_application import LegacyApplicationClient
from .mobile_application import MobileApplicationClient
from .service_application import ServiceApplicationClient
from .web_application import WebApplicationClient
| 504
|
Python
|
.py
| 13
| 37.692308
| 57
| 0.806122
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,806
|
mobile_application.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/clients/mobile_application.py
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from ..parameters import parse_implicit_response, prepare_grant_uri
from .base import Client
class MobileApplicationClient(Client):
"""A public client utilizing the implicit code grant workflow.
A user-agent-based application is a public client in which the
client code is downloaded from a web server and executes within a
user-agent (e.g. web browser) on the device used by the resource
owner. Protocol data and credentials are easily accessible (and
often visible) to the resource owner. Since such applications
reside within the user-agent, they can make seamless use of the
user-agent capabilities when requesting authorization.
The implicit grant type is used to obtain access tokens (it does not
support the issuance of refresh tokens) and is optimized for public
clients known to operate a particular redirection URI. These clients
are typically implemented in a browser using a scripting language
such as JavaScript.
As a redirection-based flow, the client must be capable of
interacting with the resource owner's user-agent (typically a web
browser) and capable of receiving incoming requests (via redirection)
from the authorization server.
Unlike the authorization code grant type in which the client makes
separate requests for authorization and access token, the client
receives the access token as the result of the authorization request.
The implicit grant type does not include client authentication, and
relies on the presence of the resource owner and the registration of
the redirection URI. Because the access token is encoded into the
redirection URI, it may be exposed to the resource owner and other
applications residing on the same device.
"""
response_type = 'token'
def prepare_request_uri(self, uri, redirect_uri=None, scope=None,
state=None, **kwargs):
"""Prepare the implicit grant request URI.
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
:param redirect_uri: OPTIONAL. The redirect URI must be an absolute URI
and it should have been registerd with the OAuth
provider prior to use. As described in `Section 3.1.2`_.
:param scope: OPTIONAL. The scope of the access request as described by
Section 3.3`_. These may be any string but are commonly
URIs or various categories such as ``videos`` or ``documents``.
:param state: RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in `Section 10.12`_.
:param kwargs: Extra arguments to include in the request URI.
In addition to supplied parameters, OAuthLib will append the ``client_id``
that was provided in the constructor as well as the mandatory ``response_type``
argument, set to ``token``::
>>> from oauthlib.oauth2 import MobileApplicationClient
>>> client = MobileApplicationClient('your_id')
>>> client.prepare_request_uri('https://example.com')
'https://example.com?client_id=your_id&response_type=token'
>>> client.prepare_request_uri('https://example.com', redirect_uri='https://a.b/callback')
'https://example.com?client_id=your_id&response_type=token&redirect_uri=https%3A%2F%2Fa.b%2Fcallback'
>>> client.prepare_request_uri('https://example.com', scope=['profile', 'pictures'])
'https://example.com?client_id=your_id&response_type=token&scope=profile+pictures'
>>> client.prepare_request_uri('https://example.com', foo='bar')
'https://example.com?client_id=your_id&response_type=token&foo=bar'
.. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
.. _`Section 2.2`: https://tools.ietf.org/html/rfc6749#section-2.2
.. _`Section 3.1.2`: https://tools.ietf.org/html/rfc6749#section-3.1.2
.. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 10.12`: https://tools.ietf.org/html/rfc6749#section-10.12
"""
scope = self.scope if scope is None else scope
return prepare_grant_uri(uri, self.client_id, self.response_type,
redirect_uri=redirect_uri, state=state, scope=scope, **kwargs)
def parse_request_uri_response(self, uri, state=None, scope=None):
"""Parse the response URI fragment.
If the resource owner grants the access request, the authorization
server issues an access token and delivers it to the client by adding
the following parameters to the fragment component of the redirection
URI using the "application/x-www-form-urlencoded" format:
:param uri: The callback URI that resulted from the user being redirected
back from the provider to you, the client.
:param state: The state provided in the authorization request.
:param scope: The scopes provided in the authorization request.
:return: Dictionary of token parameters.
:raises: OAuth2Error if response is invalid.
A successful response should always contain
**access_token**
The access token issued by the authorization server. Often
a random string.
**token_type**
The type of the token issued as described in `Section 7.1`_.
Commonly ``Bearer``.
**state**
If you provided the state parameter in the authorization phase, then
the provider is required to include that exact state value in the
response.
While it is not mandated it is recommended that the provider include
**expires_in**
The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
**scope**
Providers may supply this in all responses but are required to only
if it has changed since the authorization request.
A few example responses can be seen below::
>>> response_uri = 'https://example.com/callback#access_token=sdlfkj452&state=ss345asyht&token_type=Bearer&scope=hello+world'
>>> from oauthlib.oauth2 import MobileApplicationClient
>>> client = MobileApplicationClient('your_id')
>>> client.parse_request_uri_response(response_uri)
{
'access_token': 'sdlfkj452',
'token_type': 'Bearer',
'state': 'ss345asyht',
'scope': [u'hello', u'world']
}
>>> client.parse_request_uri_response(response_uri, state='other')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "oauthlib/oauth2/rfc6749/__init__.py", line 598, in parse_request_uri_response
**scope**
File "oauthlib/oauth2/rfc6749/parameters.py", line 197, in parse_implicit_response
raise ValueError("Mismatching or missing state in params.")
ValueError: Mismatching or missing state in params.
>>> def alert_scope_changed(message, old, new):
... print(message, old, new)
...
>>> oauthlib.signals.scope_changed.connect(alert_scope_changed)
>>> client.parse_request_body_response(response_body, scope=['other'])
('Scope has changed from "other" to "hello world".', ['other'], ['hello', 'world'])
.. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1
.. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
"""
scope = self.scope if scope is None else scope
self.token = parse_implicit_response(uri, state=state, scope=scope)
self.populate_token_attributes(self.token)
return self.token
| 8,877
|
Python
|
.py
| 142
| 51.549296
| 137
| 0.66111
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,807
|
base.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/clients/base.py
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming OAuth 2.0 RFC6749.
"""
import time
import warnings
from oauthlib.common import generate_token
from oauthlib.oauth2.rfc6749 import tokens
from oauthlib.oauth2.rfc6749.errors import (
InsecureTransportError, TokenExpiredError,
)
from oauthlib.oauth2.rfc6749.parameters import (
parse_token_response, prepare_token_request,
prepare_token_revocation_request,
)
from oauthlib.oauth2.rfc6749.utils import is_secure_transport
AUTH_HEADER = 'auth_header'
URI_QUERY = 'query'
BODY = 'body'
FORM_ENC_HEADERS = {
'Content-Type': 'application/x-www-form-urlencoded'
}
class Client:
"""Base OAuth2 client responsible for access token management.
This class also acts as a generic interface providing methods common to all
client types such as ``prepare_authorization_request`` and
``prepare_token_revocation_request``. The ``prepare_x_request`` methods are
the recommended way of interacting with clients (as opposed to the abstract
prepare uri/body/etc methods). They are recommended over the older set
because they are easier to use (more consistent) and add a few additional
security checks, such as HTTPS and state checking.
Some of these methods require further implementation only provided by the
specific purpose clients such as
:py:class:`oauthlib.oauth2.MobileApplicationClient` and thus you should always
seek to use the client class matching the OAuth workflow you need. For
Python, this is usually :py:class:`oauthlib.oauth2.WebApplicationClient`.
"""
refresh_token_key = 'refresh_token'
def __init__(self, client_id,
default_token_placement=AUTH_HEADER,
token_type='Bearer',
access_token=None,
refresh_token=None,
mac_key=None,
mac_algorithm=None,
token=None,
scope=None,
state=None,
redirect_url=None,
state_generator=generate_token,
**kwargs):
"""Initialize a client with commonly used attributes.
:param client_id: Client identifier given by the OAuth provider upon
registration.
:param default_token_placement: Tokens can be supplied in the Authorization
header (default), the URL query component (``query``) or the request
body (``body``).
:param token_type: OAuth 2 token type. Defaults to Bearer. Change this
if you specify the ``access_token`` parameter and know it is of a
different token type, such as a MAC, JWT or SAML token. Can
also be supplied as ``token_type`` inside the ``token`` dict parameter.
:param access_token: An access token (string) used to authenticate
requests to protected resources. Can also be supplied inside the
``token`` dict parameter.
:param refresh_token: A refresh token (string) used to refresh expired
tokens. Can also be supplied inside the ``token`` dict parameter.
:param mac_key: Encryption key used with MAC tokens.
:param mac_algorithm: Hashing algorithm for MAC tokens.
:param token: A dict of token attributes such as ``access_token``,
``token_type`` and ``expires_at``.
:param scope: A list of default scopes to request authorization for.
:param state: A CSRF protection string used during authorization.
:param redirect_url: The redirection endpoint on the client side to which
the user returns after authorization.
:param state_generator: A no argument state generation callable. Defaults
to :py:meth:`oauthlib.common.generate_token`.
"""
self.client_id = client_id
self.default_token_placement = default_token_placement
self.token_type = token_type
self.access_token = access_token
self.refresh_token = refresh_token
self.mac_key = mac_key
self.mac_algorithm = mac_algorithm
self.token = token or {}
self.scope = scope
self.state_generator = state_generator
self.state = state
self.redirect_url = redirect_url
self.code = None
self.expires_in = None
self._expires_at = None
self.populate_token_attributes(self.token)
@property
def token_types(self):
"""Supported token types and their respective methods
Additional tokens can be supported by extending this dictionary.
The Bearer token spec is stable and safe to use.
The MAC token spec is not yet stable and support for MAC tokens
is experimental and currently matching version 00 of the spec.
"""
return {
'Bearer': self._add_bearer_token,
'MAC': self._add_mac_token
}
def prepare_request_uri(self, *args, **kwargs):
"""Abstract method used to create request URIs."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def prepare_request_body(self, *args, **kwargs):
"""Abstract method used to create request bodies."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def parse_request_uri_response(self, *args, **kwargs):
"""Abstract method used to parse redirection responses."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def add_token(self, uri, http_method='GET', body=None, headers=None,
token_placement=None, **kwargs):
"""Add token to the request uri, body or authorization header.
The access token type provides the client with the information
required to successfully utilize the access token to make a protected
resource request (along with type-specific attributes). The client
MUST NOT use an access token if it does not understand the token
type.
For example, the "bearer" token type defined in
[`I-D.ietf-oauth-v2-bearer`_] is utilized by simply including the access
token string in the request:
.. code-block:: http
GET /resource/1 HTTP/1.1
Host: example.com
Authorization: Bearer mF_9.B5f-4.1JqM
while the "mac" token type defined in [`I-D.ietf-oauth-v2-http-mac`_] is
utilized by issuing a MAC key together with the access token which is
used to sign certain components of the HTTP requests:
.. code-block:: http
GET /resource/1 HTTP/1.1
Host: example.com
Authorization: MAC id="h480djs93hd8",
nonce="274312:dj83hs9s",
mac="kDZvddkndxvhGRXZhvuDjEWhGeE="
.. _`I-D.ietf-oauth-v2-bearer`: https://tools.ietf.org/html/rfc6749#section-12.2
.. _`I-D.ietf-oauth-v2-http-mac`: https://tools.ietf.org/html/rfc6749#section-12.2
"""
if not is_secure_transport(uri):
raise InsecureTransportError()
token_placement = token_placement or self.default_token_placement
case_insensitive_token_types = {
k.lower(): v for k, v in self.token_types.items()}
if not self.token_type.lower() in case_insensitive_token_types:
raise ValueError("Unsupported token type: %s" % self.token_type)
if not (self.access_token or self.token.get('access_token')):
raise ValueError("Missing access token.")
if self._expires_at and self._expires_at < time.time():
raise TokenExpiredError()
return case_insensitive_token_types[self.token_type.lower()](uri, http_method, body,
headers, token_placement, **kwargs)
def prepare_authorization_request(self, authorization_url, state=None,
redirect_url=None, scope=None, **kwargs):
"""Prepare the authorization request.
This is the first step in many OAuth flows in which the user is
redirected to a certain authorization URL. This method adds
required parameters to the authorization URL.
:param authorization_url: Provider authorization endpoint URL.
:param state: CSRF protection string. Will be automatically created if
not provided. The generated state is available via the ``state``
attribute. Clients should verify that the state is unchanged and
present in the authorization response. This verification is done
automatically if using the ``authorization_response`` parameter
with ``prepare_token_request``.
:param redirect_url: Redirect URL to which the user will be returned
after authorization. Must be provided unless previously setup with
the provider. If provided then it must also be provided in the
token request.
:param scope: List of scopes to request. Must be equal to
or a subset of the scopes granted when obtaining the refresh
token. If none is provided, the ones provided in the constructor are
used.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
"""
if not is_secure_transport(authorization_url):
raise InsecureTransportError()
self.state = state or self.state_generator()
self.redirect_url = redirect_url or self.redirect_url
# do not assign scope to self automatically anymore
scope = self.scope if scope is None else scope
auth_url = self.prepare_request_uri(
authorization_url, redirect_uri=self.redirect_url,
scope=scope, state=self.state, **kwargs)
return auth_url, FORM_ENC_HEADERS, ''
def prepare_token_request(self, token_url, authorization_response=None,
redirect_url=None, state=None, body='', **kwargs):
"""Prepare a token creation request.
Note that these requests usually require client authentication, either
by including client_id or a set of provider specific authentication
credentials.
:param token_url: Provider token creation endpoint URL.
:param authorization_response: The full redirection URL string, i.e.
the location to which the user was redirected after successfull
authorization. Used to mine credentials needed to obtain a token
in this step, such as authorization code.
:param redirect_url: The redirect_url supplied with the authorization
request (if there was one).
:param state:
:param body: Existing request body (URL encoded string) to embed parameters
into. This may contain extra paramters. Default ''.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
state = state or self.state
if authorization_response:
self.parse_request_uri_response(
authorization_response, state=state)
self.redirect_url = redirect_url or self.redirect_url
body = self.prepare_request_body(body=body,
redirect_uri=self.redirect_url, **kwargs)
return token_url, FORM_ENC_HEADERS, body
def prepare_refresh_token_request(self, token_url, refresh_token=None,
body='', scope=None, **kwargs):
"""Prepare an access token refresh request.
Expired access tokens can be replaced by new access tokens without
going through the OAuth dance if the client obtained a refresh token.
This refresh token and authentication credentials can be used to
obtain a new access token, and possibly a new refresh token.
:param token_url: Provider token refresh endpoint URL.
:param refresh_token: Refresh token string.
:param body: Existing request body (URL encoded string) to embed parameters
into. This may contain extra paramters. Default ''.
:param scope: List of scopes to request. Must be equal to
or a subset of the scopes granted when obtaining the refresh
token. If none is provided, the ones provided in the constructor are
used.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
# do not assign scope to self automatically anymore
scope = self.scope if scope is None else scope
body = self.prepare_refresh_body(body=body,
refresh_token=refresh_token, scope=scope, **kwargs)
return token_url, FORM_ENC_HEADERS, body
def prepare_token_revocation_request(self, revocation_url, token,
token_type_hint="access_token", body='', callback=None, **kwargs):
"""Prepare a token revocation request.
:param revocation_url: Provider token revocation endpoint URL.
:param token: The access or refresh token to be revoked (string).
:param token_type_hint: ``"access_token"`` (default) or
``"refresh_token"``. This is optional and if you wish to not pass it you
must provide ``token_type_hint=None``.
:param body:
:param callback: A jsonp callback such as ``package.callback`` to be invoked
upon receiving the response. Not that it should not include a () suffix.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
Note that JSONP request may use GET requests as the parameters will
be added to the request URL query as opposed to the request body.
An example of a revocation request
.. code-block: http
POST /revoke HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
token=45ghiukldjahdnhzdauz&token_type_hint=refresh_token
An example of a jsonp revocation request
.. code-block: http
GET /revoke?token=agabcdefddddafdd&callback=package.myCallback HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
and an error response
.. code-block: http
package.myCallback({"error":"unsupported_token_type"});
Note that these requests usually require client credentials, client_id in
the case for public clients and provider specific authentication
credentials for confidential clients.
"""
if not is_secure_transport(revocation_url):
raise InsecureTransportError()
return prepare_token_revocation_request(revocation_url, token,
token_type_hint=token_type_hint, body=body, callback=callback,
**kwargs)
def parse_request_body_response(self, body, scope=None, **kwargs):
"""Parse the JSON response body.
If the access token request is valid and authorized, the
authorization server issues an access token as described in
`Section 5.1`_. A refresh token SHOULD NOT be included. If the request
failed client authentication or is invalid, the authorization server
returns an error response as described in `Section 5.2`_.
:param body: The response body from the token request.
:param scope: Scopes originally requested. If none is provided, the ones
provided in the constructor are used.
:return: Dictionary of token parameters.
:raises: Warning if scope has changed. OAuth2Error if response is invalid.
These response are json encoded and could easily be parsed without
the assistance of OAuthLib. However, there are a few subtle issues
to be aware of regarding the response which are helpfully addressed
through the raising of various errors.
A successful response should always contain
**access_token**
The access token issued by the authorization server. Often
a random string.
**token_type**
The type of the token issued as described in `Section 7.1`_.
Commonly ``Bearer``.
While it is not mandated it is recommended that the provider include
**expires_in**
The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
**scope**
Providers may supply this in all responses but are required to only
if it has changed since the authorization request.
.. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
.. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1
"""
scope = self.scope if scope is None else scope
self.token = parse_token_response(body, scope=scope)
self.populate_token_attributes(self.token)
return self.token
def prepare_refresh_body(self, body='', refresh_token=None, scope=None, **kwargs):
"""Prepare an access token request, using a refresh token.
If the authorization server issued a refresh token to the client, the
client makes a refresh request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format in the HTTP request entity-body:
grant_type
REQUIRED. Value MUST be set to "refresh_token".
refresh_token
REQUIRED. The refresh token issued to the client.
scope
OPTIONAL. The scope of the access request as described by
Section 3.3. The requested scope MUST NOT include any scope
not originally granted by the resource owner, and if omitted is
treated as equal to the scope originally granted by the
resource owner. Note that if none is provided, the ones provided
in the constructor are used if any.
"""
refresh_token = refresh_token or self.refresh_token
scope = self.scope if scope is None else scope
return prepare_token_request(self.refresh_token_key, body=body, scope=scope,
refresh_token=refresh_token, **kwargs)
def _add_bearer_token(self, uri, http_method='GET', body=None,
headers=None, token_placement=None):
"""Add a bearer token to the request uri, body or authorization header."""
if token_placement == AUTH_HEADER:
headers = tokens.prepare_bearer_headers(self.access_token, headers)
elif token_placement == URI_QUERY:
uri = tokens.prepare_bearer_uri(self.access_token, uri)
elif token_placement == BODY:
body = tokens.prepare_bearer_body(self.access_token, body)
else:
raise ValueError("Invalid token placement.")
return uri, headers, body
def _add_mac_token(self, uri, http_method='GET', body=None,
headers=None, token_placement=AUTH_HEADER, ext=None, **kwargs):
"""Add a MAC token to the request authorization header.
Warning: MAC token support is experimental as the spec is not yet stable.
"""
if token_placement != AUTH_HEADER:
raise ValueError("Invalid token placement.")
headers = tokens.prepare_mac_header(self.access_token, uri,
self.mac_key, http_method, headers=headers, body=body, ext=ext,
hash_algorithm=self.mac_algorithm, **kwargs)
return uri, headers, body
def _populate_attributes(self, response):
warnings.warn("Please switch to the public method "
"populate_token_attributes.", DeprecationWarning)
return self.populate_token_attributes(response)
def populate_code_attributes(self, response):
"""Add attributes from an auth code response to self."""
if 'code' in response:
self.code = response.get('code')
def populate_token_attributes(self, response):
"""Add attributes from a token exchange response to self."""
if 'access_token' in response:
self.access_token = response.get('access_token')
if 'refresh_token' in response:
self.refresh_token = response.get('refresh_token')
if 'token_type' in response:
self.token_type = response.get('token_type')
if 'expires_in' in response:
self.expires_in = response.get('expires_in')
self._expires_at = time.time() + int(self.expires_in)
if 'expires_at' in response:
self._expires_at = int(response.get('expires_at'))
if 'mac_key' in response:
self.mac_key = response.get('mac_key')
if 'mac_algorithm' in response:
self.mac_algorithm = response.get('mac_algorithm')
| 22,046
|
Python
|
.py
| 396
| 44.744949
| 110
| 0.656384
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,808
|
legacy_application.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/clients/legacy_application.py
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from ..parameters import prepare_token_request
from .base import Client
class LegacyApplicationClient(Client):
"""A public client using the resource owner password and username directly.
The resource owner password credentials grant type is suitable in
cases where the resource owner has a trust relationship with the
client, such as the device operating system or a highly privileged
application. The authorization server should take special care when
enabling this grant type, and only allow it when other flows are not
viable.
The grant type is suitable for clients capable of obtaining the
resource owner's credentials (username and password, typically using
an interactive form). It is also used to migrate existing clients
using direct authentication schemes such as HTTP Basic or Digest
authentication to OAuth by converting the stored credentials to an
access token.
The method through which the client obtains the resource owner
credentials is beyond the scope of this specification. The client
MUST discard the credentials once an access token has been obtained.
"""
grant_type = 'password'
def __init__(self, client_id, **kwargs):
super().__init__(client_id, **kwargs)
def prepare_request_body(self, username, password, body='', scope=None,
include_client_id=False, **kwargs):
"""Add the resource owner password and username to the request body.
The client makes a request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format per `Appendix B`_ in the HTTP request entity-body:
:param username: The resource owner username.
:param password: The resource owner password.
:param body: Existing request body (URL encoded string) to embed parameters
into. This may contain extra paramters. Default ''.
:param scope: The scope of the access request as described by
`Section 3.3`_.
:param include_client_id: `True` to send the `client_id` in the
body of the upstream request. This is required
if the client is not authenticating with the
authorization server as described in
`Section 3.2.1`_. False otherwise (default).
:type include_client_id: Boolean
:param kwargs: Extra credentials to include in the token request.
If the client type is confidential or the client was issued client
credentials (or assigned other authentication requirements), the
client MUST authenticate with the authorization server as described
in `Section 3.2.1`_.
The prepared body will include all provided credentials as well as
the ``grant_type`` parameter set to ``password``::
>>> from oauthlib.oauth2 import LegacyApplicationClient
>>> client = LegacyApplicationClient('your_id')
>>> client.prepare_request_body(username='foo', password='bar', scope=['hello', 'world'])
'grant_type=password&username=foo&scope=hello+world&password=bar'
.. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
.. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 3.2.1`: https://tools.ietf.org/html/rfc6749#section-3.2.1
"""
kwargs['client_id'] = self.client_id
kwargs['include_client_id'] = include_client_id
scope = self.scope if scope is None else scope
return prepare_token_request(self.grant_type, body=body, username=username,
password=password, scope=scope, **kwargs)
| 4,031
|
Python
|
.py
| 68
| 49.470588
| 101
| 0.671903
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,809
|
revocation.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/endpoints/revocation.py
|
"""
oauthlib.oauth2.rfc6749.endpoint.revocation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
An implementation of the OAuth 2 `Token Revocation`_ spec (draft 11).
.. _`Token Revocation`: https://tools.ietf.org/html/draft-ietf-oauth-revocation-11
"""
import logging
from oauthlib.common import Request
from ..errors import OAuth2Error
from .base import BaseEndpoint, catch_errors_and_unavailability
log = logging.getLogger(__name__)
class RevocationEndpoint(BaseEndpoint):
"""Token revocation endpoint.
Endpoint used by authenticated clients to revoke access and refresh tokens.
Commonly this will be part of the Authorization Endpoint.
"""
valid_token_types = ('access_token', 'refresh_token')
valid_request_methods = ('POST',)
def __init__(self, request_validator, supported_token_types=None,
enable_jsonp=False):
BaseEndpoint.__init__(self)
self.request_validator = request_validator
self.supported_token_types = (
supported_token_types or self.valid_token_types)
self.enable_jsonp = enable_jsonp
@catch_errors_and_unavailability
def create_revocation_response(self, uri, http_method='POST', body=None,
headers=None):
"""Revoke supplied access or refresh token.
The authorization server responds with HTTP status code 200 if the
token has been revoked sucessfully or if the client submitted an
invalid token.
Note: invalid tokens do not cause an error response since the client
cannot handle such an error in a reasonable way. Moreover, the purpose
of the revocation request, invalidating the particular token, is
already achieved.
The content of the response body is ignored by the client as all
necessary information is conveyed in the response code.
An invalid token type hint value is ignored by the authorization server
and does not influence the revocation response.
"""
resp_headers = {
'Content-Type': 'application/json',
'Cache-Control': 'no-store',
'Pragma': 'no-cache',
}
request = Request(
uri, http_method=http_method, body=body, headers=headers)
try:
self.validate_revocation_request(request)
log.debug('Token revocation valid for %r.', request)
except OAuth2Error as e:
log.debug('Client error during validation of %r. %r.', request, e)
response_body = e.json
if self.enable_jsonp and request.callback:
response_body = '{}({});'.format(request.callback, response_body)
resp_headers.update(e.headers)
return resp_headers, response_body, e.status_code
self.request_validator.revoke_token(request.token,
request.token_type_hint, request)
response_body = ''
if self.enable_jsonp and request.callback:
response_body = request.callback + '();'
return {}, response_body, 200
def validate_revocation_request(self, request):
"""Ensure the request is valid.
The client constructs the request by including the following parameters
using the "application/x-www-form-urlencoded" format in the HTTP
request entity-body:
token (REQUIRED). The token that the client wants to get revoked.
token_type_hint (OPTIONAL). A hint about the type of the token
submitted for revocation. Clients MAY pass this parameter in order to
help the authorization server to optimize the token lookup. If the
server is unable to locate the token using the given hint, it MUST
extend its search accross all of its supported token types. An
authorization server MAY ignore this parameter, particularly if it is
able to detect the token type automatically. This specification
defines two such values:
* access_token: An Access Token as defined in [RFC6749],
`section 1.4`_
* refresh_token: A Refresh Token as defined in [RFC6749],
`section 1.5`_
Specific implementations, profiles, and extensions of this
specification MAY define other values for this parameter using
the registry defined in `Section 4.1.2`_.
The client also includes its authentication credentials as described in
`Section 2.3`_. of [`RFC6749`_].
.. _`section 1.4`: https://tools.ietf.org/html/rfc6749#section-1.4
.. _`section 1.5`: https://tools.ietf.org/html/rfc6749#section-1.5
.. _`section 2.3`: https://tools.ietf.org/html/rfc6749#section-2.3
.. _`Section 4.1.2`: https://tools.ietf.org/html/draft-ietf-oauth-revocation-11#section-4.1.2
.. _`RFC6749`: https://tools.ietf.org/html/rfc6749
"""
self._raise_on_bad_method(request)
self._raise_on_bad_post_request(request)
self._raise_on_missing_token(request)
self._raise_on_invalid_client(request)
self._raise_on_unsupported_token(request)
| 5,212
|
Python
|
.py
| 98
| 43.336735
| 101
| 0.654542
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,810
|
pre_configured.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/endpoints/pre_configured.py
|
"""
oauthlib.oauth2.rfc6749.endpoints.pre_configured
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various endpoints needed
for providing OAuth 2.0 RFC6749 servers.
"""
from ..grant_types import (
AuthorizationCodeGrant, ClientCredentialsGrant, ImplicitGrant,
RefreshTokenGrant, ResourceOwnerPasswordCredentialsGrant,
)
from ..tokens import BearerToken
from .authorization import AuthorizationEndpoint
from .introspect import IntrospectEndpoint
from .resource import ResourceEndpoint
from .revocation import RevocationEndpoint
from .token import TokenEndpoint
class Server(AuthorizationEndpoint, IntrospectEndpoint, TokenEndpoint,
ResourceEndpoint, RevocationEndpoint):
"""An all-in-one endpoint featuring all four major grant types."""
def __init__(self, request_validator, token_expires_in=None,
token_generator=None, refresh_token_generator=None,
*args, **kwargs):
"""Construct a new all-grants-in-one server.
:param request_validator: An implementation of
oauthlib.oauth2.RequestValidator.
:param token_expires_in: An int or a function to generate a token
expiration offset (in seconds) given a
oauthlib.common.Request object.
:param token_generator: A function to generate a token from a request.
:param refresh_token_generator: A function to generate a token from a
request for the refresh token.
:param kwargs: Extra parameters to pass to authorization-,
token-, resource-, and revocation-endpoint constructors.
"""
self.auth_grant = AuthorizationCodeGrant(request_validator)
self.implicit_grant = ImplicitGrant(request_validator)
self.password_grant = ResourceOwnerPasswordCredentialsGrant(
request_validator)
self.credentials_grant = ClientCredentialsGrant(request_validator)
self.refresh_grant = RefreshTokenGrant(request_validator)
self.bearer = BearerToken(request_validator, token_generator,
token_expires_in, refresh_token_generator)
AuthorizationEndpoint.__init__(self, default_response_type='code',
response_types={
'code': self.auth_grant,
'token': self.implicit_grant,
'none': self.auth_grant
},
default_token_type=self.bearer)
TokenEndpoint.__init__(self, default_grant_type='authorization_code',
grant_types={
'authorization_code': self.auth_grant,
'password': self.password_grant,
'client_credentials': self.credentials_grant,
'refresh_token': self.refresh_grant,
},
default_token_type=self.bearer)
ResourceEndpoint.__init__(self, default_token='Bearer',
token_types={'Bearer': self.bearer})
RevocationEndpoint.__init__(self, request_validator)
IntrospectEndpoint.__init__(self, request_validator)
class WebApplicationServer(AuthorizationEndpoint, IntrospectEndpoint, TokenEndpoint,
ResourceEndpoint, RevocationEndpoint):
"""An all-in-one endpoint featuring Authorization code grant and Bearer tokens."""
def __init__(self, request_validator, token_generator=None,
token_expires_in=None, refresh_token_generator=None, **kwargs):
"""Construct a new web application server.
:param request_validator: An implementation of
oauthlib.oauth2.RequestValidator.
:param token_expires_in: An int or a function to generate a token
expiration offset (in seconds) given a
oauthlib.common.Request object.
:param token_generator: A function to generate a token from a request.
:param refresh_token_generator: A function to generate a token from a
request for the refresh token.
:param kwargs: Extra parameters to pass to authorization-,
token-, resource-, and revocation-endpoint constructors.
"""
self.auth_grant = AuthorizationCodeGrant(request_validator)
self.refresh_grant = RefreshTokenGrant(request_validator)
self.bearer = BearerToken(request_validator, token_generator,
token_expires_in, refresh_token_generator)
AuthorizationEndpoint.__init__(self, default_response_type='code',
response_types={'code': self.auth_grant},
default_token_type=self.bearer)
TokenEndpoint.__init__(self, default_grant_type='authorization_code',
grant_types={
'authorization_code': self.auth_grant,
'refresh_token': self.refresh_grant,
},
default_token_type=self.bearer)
ResourceEndpoint.__init__(self, default_token='Bearer',
token_types={'Bearer': self.bearer})
RevocationEndpoint.__init__(self, request_validator)
IntrospectEndpoint.__init__(self, request_validator)
class MobileApplicationServer(AuthorizationEndpoint, IntrospectEndpoint,
ResourceEndpoint, RevocationEndpoint):
"""An all-in-one endpoint featuring Implicit code grant and Bearer tokens."""
def __init__(self, request_validator, token_generator=None,
token_expires_in=None, refresh_token_generator=None, **kwargs):
"""Construct a new implicit grant server.
:param request_validator: An implementation of
oauthlib.oauth2.RequestValidator.
:param token_expires_in: An int or a function to generate a token
expiration offset (in seconds) given a
oauthlib.common.Request object.
:param token_generator: A function to generate a token from a request.
:param refresh_token_generator: A function to generate a token from a
request for the refresh token.
:param kwargs: Extra parameters to pass to authorization-,
token-, resource-, and revocation-endpoint constructors.
"""
self.implicit_grant = ImplicitGrant(request_validator)
self.bearer = BearerToken(request_validator, token_generator,
token_expires_in, refresh_token_generator)
AuthorizationEndpoint.__init__(self, default_response_type='token',
response_types={
'token': self.implicit_grant},
default_token_type=self.bearer)
ResourceEndpoint.__init__(self, default_token='Bearer',
token_types={'Bearer': self.bearer})
RevocationEndpoint.__init__(self, request_validator,
supported_token_types=['access_token'])
IntrospectEndpoint.__init__(self, request_validator,
supported_token_types=['access_token'])
class LegacyApplicationServer(TokenEndpoint, IntrospectEndpoint,
ResourceEndpoint, RevocationEndpoint):
"""An all-in-one endpoint featuring Resource Owner Password Credentials grant and Bearer tokens."""
def __init__(self, request_validator, token_generator=None,
token_expires_in=None, refresh_token_generator=None, **kwargs):
"""Construct a resource owner password credentials grant server.
:param request_validator: An implementation of
oauthlib.oauth2.RequestValidator.
:param token_expires_in: An int or a function to generate a token
expiration offset (in seconds) given a
oauthlib.common.Request object.
:param token_generator: A function to generate a token from a request.
:param refresh_token_generator: A function to generate a token from a
request for the refresh token.
:param kwargs: Extra parameters to pass to authorization-,
token-, resource-, and revocation-endpoint constructors.
"""
self.password_grant = ResourceOwnerPasswordCredentialsGrant(
request_validator)
self.refresh_grant = RefreshTokenGrant(request_validator)
self.bearer = BearerToken(request_validator, token_generator,
token_expires_in, refresh_token_generator)
TokenEndpoint.__init__(self, default_grant_type='password',
grant_types={
'password': self.password_grant,
'refresh_token': self.refresh_grant,
},
default_token_type=self.bearer)
ResourceEndpoint.__init__(self, default_token='Bearer',
token_types={'Bearer': self.bearer})
RevocationEndpoint.__init__(self, request_validator)
IntrospectEndpoint.__init__(self, request_validator)
class BackendApplicationServer(TokenEndpoint, IntrospectEndpoint,
ResourceEndpoint, RevocationEndpoint):
"""An all-in-one endpoint featuring Client Credentials grant and Bearer tokens."""
def __init__(self, request_validator, token_generator=None,
token_expires_in=None, refresh_token_generator=None, **kwargs):
"""Construct a client credentials grant server.
:param request_validator: An implementation of
oauthlib.oauth2.RequestValidator.
:param token_expires_in: An int or a function to generate a token
expiration offset (in seconds) given a
oauthlib.common.Request object.
:param token_generator: A function to generate a token from a request.
:param refresh_token_generator: A function to generate a token from a
request for the refresh token.
:param kwargs: Extra parameters to pass to authorization-,
token-, resource-, and revocation-endpoint constructors.
"""
self.credentials_grant = ClientCredentialsGrant(request_validator)
self.bearer = BearerToken(request_validator, token_generator,
token_expires_in, refresh_token_generator)
TokenEndpoint.__init__(self, default_grant_type='client_credentials',
grant_types={
'client_credentials': self.credentials_grant},
default_token_type=self.bearer)
ResourceEndpoint.__init__(self, default_token='Bearer',
token_types={'Bearer': self.bearer})
RevocationEndpoint.__init__(self, request_validator,
supported_token_types=['access_token'])
IntrospectEndpoint.__init__(self, request_validator,
supported_token_types=['access_token'])
| 11,954
|
Python
|
.py
| 187
| 45.315508
| 103
| 0.585875
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,811
|
token.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/endpoints/token.py
|
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
import logging
from oauthlib.common import Request
from oauthlib.oauth2.rfc6749 import utils
from .base import BaseEndpoint, catch_errors_and_unavailability
log = logging.getLogger(__name__)
class TokenEndpoint(BaseEndpoint):
"""Token issuing endpoint.
The token endpoint is used by the client to obtain an access token by
presenting its authorization grant or refresh token. The token
endpoint is used with every authorization grant except for the
implicit grant type (since an access token is issued directly).
The means through which the client obtains the location of the token
endpoint are beyond the scope of this specification, but the location
is typically provided in the service documentation.
The endpoint URI MAY include an "application/x-www-form-urlencoded"
formatted (per `Appendix B`_) query component,
which MUST be retained when adding additional query parameters. The
endpoint URI MUST NOT include a fragment component::
https://example.com/path?query=component # OK
https://example.com/path?query=component#fragment # Not OK
Since requests to the token endpoint result in the transmission of
clear-text credentials (in the HTTP request and response), the
authorization server MUST require the use of TLS as described in
Section 1.6 when sending requests to the token endpoint::
# We will deny any request which URI schema is not with https
The client MUST use the HTTP "POST" method when making access token
requests::
# HTTP method is currently not enforced
Parameters sent without a value MUST be treated as if they were
omitted from the request. The authorization server MUST ignore
unrecognized request parameters. Request and response parameters
MUST NOT be included more than once::
# Delegated to each grant type.
.. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
"""
valid_request_methods = ('POST',)
def __init__(self, default_grant_type, default_token_type, grant_types):
BaseEndpoint.__init__(self)
self._grant_types = grant_types
self._default_token_type = default_token_type
self._default_grant_type = default_grant_type
@property
def grant_types(self):
return self._grant_types
@property
def default_grant_type(self):
return self._default_grant_type
@property
def default_grant_type_handler(self):
return self.grant_types.get(self.default_grant_type)
@property
def default_token_type(self):
return self._default_token_type
@catch_errors_and_unavailability
def create_token_response(self, uri, http_method='POST', body=None,
headers=None, credentials=None, grant_type_for_scope=None,
claims=None):
"""Extract grant_type and route to the designated handler."""
request = Request(
uri, http_method=http_method, body=body, headers=headers)
self.validate_token_request(request)
# 'scope' is an allowed Token Request param in both the "Resource Owner Password Credentials Grant"
# and "Client Credentials Grant" flows
# https://tools.ietf.org/html/rfc6749#section-4.3.2
# https://tools.ietf.org/html/rfc6749#section-4.4.2
request.scopes = utils.scope_to_list(request.scope)
request.extra_credentials = credentials
if grant_type_for_scope:
request.grant_type = grant_type_for_scope
# OpenID Connect claims, if provided. The server using oauthlib might choose
# to implement the claims parameter of the Authorization Request. In this case
# it should retrieve those claims and pass them via the claims argument here,
# as a dict.
if claims:
request.claims = claims
grant_type_handler = self.grant_types.get(request.grant_type,
self.default_grant_type_handler)
log.debug('Dispatching grant_type %s request to %r.',
request.grant_type, grant_type_handler)
return grant_type_handler.create_token_response(
request, self.default_token_type)
def validate_token_request(self, request):
self._raise_on_bad_method(request)
self._raise_on_bad_post_request(request)
| 4,595
|
Python
|
.py
| 90
| 43.2
| 107
| 0.695264
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,812
|
authorization.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/endpoints/authorization.py
|
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
import logging
from oauthlib.common import Request
from oauthlib.oauth2.rfc6749 import utils
from .base import BaseEndpoint, catch_errors_and_unavailability
log = logging.getLogger(__name__)
class AuthorizationEndpoint(BaseEndpoint):
"""Authorization endpoint - used by the client to obtain authorization
from the resource owner via user-agent redirection.
The authorization endpoint is used to interact with the resource
owner and obtain an authorization grant. The authorization server
MUST first verify the identity of the resource owner. The way in
which the authorization server authenticates the resource owner (e.g.
username and password login, session cookies) is beyond the scope of
this specification.
The endpoint URI MAY include an "application/x-www-form-urlencoded"
formatted (per `Appendix B`_) query component,
which MUST be retained when adding additional query parameters. The
endpoint URI MUST NOT include a fragment component::
https://example.com/path?query=component # OK
https://example.com/path?query=component#fragment # Not OK
Since requests to the authorization endpoint result in user
authentication and the transmission of clear-text credentials (in the
HTTP response), the authorization server MUST require the use of TLS
as described in Section 1.6 when sending requests to the
authorization endpoint::
# We will deny any request which URI schema is not with https
The authorization server MUST support the use of the HTTP "GET"
method [RFC2616] for the authorization endpoint, and MAY support the
use of the "POST" method as well::
# HTTP method is currently not enforced
Parameters sent without a value MUST be treated as if they were
omitted from the request. The authorization server MUST ignore
unrecognized request parameters. Request and response parameters
MUST NOT be included more than once::
# Enforced through the design of oauthlib.common.Request
.. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
"""
def __init__(self, default_response_type, default_token_type,
response_types):
BaseEndpoint.__init__(self)
self._response_types = response_types
self._default_response_type = default_response_type
self._default_token_type = default_token_type
@property
def response_types(self):
return self._response_types
@property
def default_response_type(self):
return self._default_response_type
@property
def default_response_type_handler(self):
return self.response_types.get(self.default_response_type)
@property
def default_token_type(self):
return self._default_token_type
@catch_errors_and_unavailability
def create_authorization_response(self, uri, http_method='GET', body=None,
headers=None, scopes=None, credentials=None):
"""Extract response_type and route to the designated handler."""
request = Request(
uri, http_method=http_method, body=body, headers=headers)
request.scopes = scopes
# TODO: decide whether this should be a required argument
request.user = None # TODO: explain this in docs
for k, v in (credentials or {}).items():
setattr(request, k, v)
response_type_handler = self.response_types.get(
request.response_type, self.default_response_type_handler)
log.debug('Dispatching response_type %s request to %r.',
request.response_type, response_type_handler)
return response_type_handler.create_authorization_response(
request, self.default_token_type)
@catch_errors_and_unavailability
def validate_authorization_request(self, uri, http_method='GET', body=None,
headers=None):
"""Extract response_type and route to the designated handler."""
request = Request(
uri, http_method=http_method, body=body, headers=headers)
request.scopes = utils.scope_to_list(request.scope)
response_type_handler = self.response_types.get(
request.response_type, self.default_response_type_handler)
return response_type_handler.validate_authorization_request(request)
| 4,584
|
Python
|
.py
| 88
| 44.477273
| 83
| 0.706264
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,813
|
metadata.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/endpoints/metadata.py
|
"""
oauthlib.oauth2.rfc6749.endpoint.metadata
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
An implementation of the `OAuth 2.0 Authorization Server Metadata`.
.. _`OAuth 2.0 Authorization Server Metadata`: https://tools.ietf.org/html/rfc8414
"""
import copy
import json
import logging
from .. import grant_types
from .authorization import AuthorizationEndpoint
from .base import BaseEndpoint, catch_errors_and_unavailability
from .introspect import IntrospectEndpoint
from .revocation import RevocationEndpoint
from .token import TokenEndpoint
log = logging.getLogger(__name__)
class MetadataEndpoint(BaseEndpoint):
"""OAuth2.0 Authorization Server Metadata endpoint.
This specification generalizes the metadata format defined by
`OpenID Connect Discovery 1.0` in a way that is compatible
with OpenID Connect Discovery while being applicable to a wider set
of OAuth 2.0 use cases. This is intentionally parallel to the way
that OAuth 2.0 Dynamic Client Registration Protocol [`RFC7591`_]
generalized the dynamic client registration mechanisms defined by
OpenID Connect Dynamic Client Registration 1.0
in a way that is compatible with it.
.. _`OpenID Connect Discovery 1.0`: https://openid.net/specs/openid-connect-discovery-1_0.html
.. _`RFC7591`: https://tools.ietf.org/html/rfc7591
"""
def __init__(self, endpoints, claims={}, raise_errors=True):
assert isinstance(claims, dict)
for endpoint in endpoints:
assert isinstance(endpoint, BaseEndpoint)
BaseEndpoint.__init__(self)
self.raise_errors = raise_errors
self.endpoints = endpoints
self.initial_claims = claims
self.claims = self.validate_metadata_server()
@catch_errors_and_unavailability
def create_metadata_response(self, uri, http_method='GET', body=None,
headers=None):
"""Create metadata response
"""
headers = {
'Content-Type': 'application/json'
}
return headers, json.dumps(self.claims), 200
def validate_metadata(self, array, key, is_required=False, is_list=False, is_url=False, is_issuer=False):
if not self.raise_errors:
return
if key not in array:
if is_required:
raise ValueError("key {} is a mandatory metadata.".format(key))
elif is_issuer:
if not array[key].startswith("https"):
raise ValueError("key {}: {} must be an HTTPS URL".format(key, array[key]))
if "?" in array[key] or "&" in array[key] or "#" in array[key]:
raise ValueError("key {}: {} must not contain query or fragment components".format(key, array[key]))
elif is_url:
if not array[key].startswith("http"):
raise ValueError("key {}: {} must be an URL".format(key, array[key]))
elif is_list:
if not isinstance(array[key], list):
raise ValueError("key {}: {} must be an Array".format(key, array[key]))
for elem in array[key]:
if not isinstance(elem, str):
raise ValueError("array {}: {} must contains only string (not {})".format(key, array[key], elem))
def validate_metadata_token(self, claims, endpoint):
"""
If the token endpoint is used in the grant type, the value of this
parameter MUST be the same as the value of the "grant_type"
parameter passed to the token endpoint defined in the grant type
definition.
"""
self._grant_types.extend(endpoint._grant_types.keys())
claims.setdefault("token_endpoint_auth_methods_supported", ["client_secret_post", "client_secret_basic"])
self.validate_metadata(claims, "token_endpoint_auth_methods_supported", is_list=True)
self.validate_metadata(claims, "token_endpoint_auth_signing_alg_values_supported", is_list=True)
self.validate_metadata(claims, "token_endpoint", is_required=True, is_url=True)
def validate_metadata_authorization(self, claims, endpoint):
claims.setdefault("response_types_supported",
list(filter(lambda x: x != "none", endpoint._response_types.keys())))
claims.setdefault("response_modes_supported", ["query", "fragment"])
# The OAuth2.0 Implicit flow is defined as a "grant type" but it is not
# using the "token" endpoint, as such, we have to add it explicitly to
# the list of "grant_types_supported" when enabled.
if "token" in claims["response_types_supported"]:
self._grant_types.append("implicit")
self.validate_metadata(claims, "response_types_supported", is_required=True, is_list=True)
self.validate_metadata(claims, "response_modes_supported", is_list=True)
if "code" in claims["response_types_supported"]:
code_grant = endpoint._response_types["code"]
if not isinstance(code_grant, grant_types.AuthorizationCodeGrant) and hasattr(code_grant, "default_grant"):
code_grant = code_grant.default_grant
claims.setdefault("code_challenge_methods_supported",
list(code_grant._code_challenge_methods.keys()))
self.validate_metadata(claims, "code_challenge_methods_supported", is_list=True)
self.validate_metadata(claims, "authorization_endpoint", is_required=True, is_url=True)
def validate_metadata_revocation(self, claims, endpoint):
claims.setdefault("revocation_endpoint_auth_methods_supported",
["client_secret_post", "client_secret_basic"])
self.validate_metadata(claims, "revocation_endpoint_auth_methods_supported", is_list=True)
self.validate_metadata(claims, "revocation_endpoint_auth_signing_alg_values_supported", is_list=True)
self.validate_metadata(claims, "revocation_endpoint", is_required=True, is_url=True)
def validate_metadata_introspection(self, claims, endpoint):
claims.setdefault("introspection_endpoint_auth_methods_supported",
["client_secret_post", "client_secret_basic"])
self.validate_metadata(claims, "introspection_endpoint_auth_methods_supported", is_list=True)
self.validate_metadata(claims, "introspection_endpoint_auth_signing_alg_values_supported", is_list=True)
self.validate_metadata(claims, "introspection_endpoint", is_required=True, is_url=True)
def validate_metadata_server(self):
"""
Authorization servers can have metadata describing their
configuration. The following authorization server metadata values
are used by this specification. More details can be found in
`RFC8414 section 2`_ :
issuer
REQUIRED
authorization_endpoint
URL of the authorization server's authorization endpoint
[`RFC6749#Authorization`_]. This is REQUIRED unless no grant types are supported
that use the authorization endpoint.
token_endpoint
URL of the authorization server's token endpoint [`RFC6749#Token`_]. This
is REQUIRED unless only the implicit grant type is supported.
scopes_supported
RECOMMENDED.
response_types_supported
REQUIRED.
Other OPTIONAL fields:
jwks_uri,
registration_endpoint,
response_modes_supported
grant_types_supported
OPTIONAL. JSON array containing a list of the OAuth 2.0 grant
type values that this authorization server supports. The array
values used are the same as those used with the "grant_types"
parameter defined by "OAuth 2.0 Dynamic Client Registration
Protocol" [`RFC7591`_]. If omitted, the default value is
"["authorization_code", "implicit"]".
token_endpoint_auth_methods_supported
token_endpoint_auth_signing_alg_values_supported
service_documentation
ui_locales_supported
op_policy_uri
op_tos_uri
revocation_endpoint
revocation_endpoint_auth_methods_supported
revocation_endpoint_auth_signing_alg_values_supported
introspection_endpoint
introspection_endpoint_auth_methods_supported
introspection_endpoint_auth_signing_alg_values_supported
code_challenge_methods_supported
Additional authorization server metadata parameters MAY also be used.
Some are defined by other specifications, such as OpenID Connect
Discovery 1.0 [`OpenID.Discovery`_].
.. _`RFC8414 section 2`: https://tools.ietf.org/html/rfc8414#section-2
.. _`RFC6749#Authorization`: https://tools.ietf.org/html/rfc6749#section-3.1
.. _`RFC6749#Token`: https://tools.ietf.org/html/rfc6749#section-3.2
.. _`RFC7591`: https://tools.ietf.org/html/rfc7591
.. _`OpenID.Discovery`: https://openid.net/specs/openid-connect-discovery-1_0.html
"""
claims = copy.deepcopy(self.initial_claims)
self.validate_metadata(claims, "issuer", is_required=True, is_issuer=True)
self.validate_metadata(claims, "jwks_uri", is_url=True)
self.validate_metadata(claims, "scopes_supported", is_list=True)
self.validate_metadata(claims, "service_documentation", is_url=True)
self.validate_metadata(claims, "ui_locales_supported", is_list=True)
self.validate_metadata(claims, "op_policy_uri", is_url=True)
self.validate_metadata(claims, "op_tos_uri", is_url=True)
self._grant_types = []
for endpoint in self.endpoints:
if isinstance(endpoint, TokenEndpoint):
self.validate_metadata_token(claims, endpoint)
if isinstance(endpoint, AuthorizationEndpoint):
self.validate_metadata_authorization(claims, endpoint)
if isinstance(endpoint, RevocationEndpoint):
self.validate_metadata_revocation(claims, endpoint)
if isinstance(endpoint, IntrospectEndpoint):
self.validate_metadata_introspection(claims, endpoint)
# "grant_types_supported" is a combination of all OAuth2 grant types
# allowed in the current provider implementation.
claims.setdefault("grant_types_supported", self._grant_types)
self.validate_metadata(claims, "grant_types_supported", is_list=True)
return claims
| 10,467
|
Python
|
.py
| 185
| 47.010811
| 119
| 0.676931
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,814
|
__init__.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/endpoints/__init__.py
|
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from .authorization import AuthorizationEndpoint
from .introspect import IntrospectEndpoint
from .metadata import MetadataEndpoint
from .pre_configured import (
BackendApplicationServer, LegacyApplicationServer, MobileApplicationServer,
Server, WebApplicationServer,
)
from .resource import ResourceEndpoint
from .revocation import RevocationEndpoint
from .token import TokenEndpoint
| 553
|
Python
|
.py
| 16
| 33
| 79
| 0.824627
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,815
|
base.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/endpoints/base.py
|
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
import functools
import logging
from ..errors import (
FatalClientError, InvalidClientError, InvalidRequestError, OAuth2Error,
ServerError, TemporarilyUnavailableError, UnsupportedTokenTypeError,
)
log = logging.getLogger(__name__)
class BaseEndpoint:
def __init__(self):
self._available = True
self._catch_errors = False
self._valid_request_methods = None
@property
def valid_request_methods(self):
return self._valid_request_methods
@valid_request_methods.setter
def valid_request_methods(self, valid_request_methods):
if valid_request_methods is not None:
valid_request_methods = [x.upper() for x in valid_request_methods]
self._valid_request_methods = valid_request_methods
@property
def available(self):
return self._available
@available.setter
def available(self, available):
self._available = available
@property
def catch_errors(self):
return self._catch_errors
@catch_errors.setter
def catch_errors(self, catch_errors):
self._catch_errors = catch_errors
def _raise_on_missing_token(self, request):
"""Raise error on missing token."""
if not request.token:
raise InvalidRequestError(request=request,
description='Missing token parameter.')
def _raise_on_invalid_client(self, request):
"""Raise on failed client authentication."""
if self.request_validator.client_authentication_required(request):
if not self.request_validator.authenticate_client(request):
log.debug('Client authentication failed, %r.', request)
raise InvalidClientError(request=request)
elif not self.request_validator.authenticate_client_id(request.client_id, request):
log.debug('Client authentication failed, %r.', request)
raise InvalidClientError(request=request)
def _raise_on_unsupported_token(self, request):
"""Raise on unsupported tokens."""
if (request.token_type_hint and
request.token_type_hint in self.valid_token_types and
request.token_type_hint not in self.supported_token_types):
raise UnsupportedTokenTypeError(request=request)
def _raise_on_bad_method(self, request):
if self.valid_request_methods is None:
raise ValueError('Configure "valid_request_methods" property first')
if request.http_method.upper() not in self.valid_request_methods:
raise InvalidRequestError(request=request,
description=('Unsupported request method %s' % request.http_method.upper()))
def _raise_on_bad_post_request(self, request):
"""Raise if invalid POST request received
"""
if request.http_method.upper() == 'POST':
query_params = request.uri_query or ""
if query_params:
raise InvalidRequestError(request=request,
description=('URL query parameters are not allowed'))
def catch_errors_and_unavailability(f):
@functools.wraps(f)
def wrapper(endpoint, uri, *args, **kwargs):
if not endpoint.available:
e = TemporarilyUnavailableError()
log.info('Endpoint unavailable, ignoring request %s.' % uri)
return {}, e.json, 503
if endpoint.catch_errors:
try:
return f(endpoint, uri, *args, **kwargs)
except OAuth2Error:
raise
except FatalClientError:
raise
except Exception as e:
error = ServerError()
log.warning(
'Exception caught while processing request, %s.' % e)
return {}, error.json, 500
else:
return f(endpoint, uri, *args, **kwargs)
return wrapper
| 4,130
|
Python
|
.py
| 94
| 33.957447
| 114
| 0.636681
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,816
|
resource.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/endpoints/resource.py
|
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
import logging
from oauthlib.common import Request
from .base import BaseEndpoint, catch_errors_and_unavailability
log = logging.getLogger(__name__)
class ResourceEndpoint(BaseEndpoint):
"""Authorizes access to protected resources.
The client accesses protected resources by presenting the access
token to the resource server. The resource server MUST validate the
access token and ensure that it has not expired and that its scope
covers the requested resource. The methods used by the resource
server to validate the access token (as well as any error responses)
are beyond the scope of this specification but generally involve an
interaction or coordination between the resource server and the
authorization server::
# For most cases, returning a 403 should suffice.
The method in which the client utilizes the access token to
authenticate with the resource server depends on the type of access
token issued by the authorization server. Typically, it involves
using the HTTP "Authorization" request header field [RFC2617] with an
authentication scheme defined by the specification of the access
token type used, such as [RFC6750]::
# Access tokens may also be provided in query and body
https://example.com/protected?access_token=kjfch2345sdf # Query
access_token=sdf23409df # Body
"""
def __init__(self, default_token, token_types):
BaseEndpoint.__init__(self)
self._tokens = token_types
self._default_token = default_token
@property
def default_token(self):
return self._default_token
@property
def default_token_type_handler(self):
return self.tokens.get(self.default_token)
@property
def tokens(self):
return self._tokens
@catch_errors_and_unavailability
def verify_request(self, uri, http_method='GET', body=None, headers=None,
scopes=None):
"""Validate client, code etc, return body + headers"""
request = Request(uri, http_method, body, headers)
request.token_type = self.find_token_type(request)
request.scopes = scopes
token_type_handler = self.tokens.get(request.token_type,
self.default_token_type_handler)
log.debug('Dispatching token_type %s request to %r.',
request.token_type, token_type_handler)
return token_type_handler.validate_request(request), request
def find_token_type(self, request):
"""Token type identification.
RFC 6749 does not provide a method for easily differentiating between
different token types during protected resource access. We estimate
the most likely token type (if any) by asking each known token type
to give an estimation based on the request.
"""
estimates = sorted(((t.estimate_type(request), n)
for n, t in self.tokens.items()), reverse=True)
return estimates[0][1] if len(estimates) else None
| 3,248
|
Python
|
.py
| 66
| 41.606061
| 77
| 0.696271
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,817
|
introspect.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/endpoints/introspect.py
|
"""
oauthlib.oauth2.rfc6749.endpoint.introspect
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
An implementation of the OAuth 2.0 `Token Introspection`.
.. _`Token Introspection`: https://tools.ietf.org/html/rfc7662
"""
import json
import logging
from oauthlib.common import Request
from ..errors import OAuth2Error
from .base import BaseEndpoint, catch_errors_and_unavailability
log = logging.getLogger(__name__)
class IntrospectEndpoint(BaseEndpoint):
"""Introspect token endpoint.
This endpoint defines a method to query an OAuth 2.0 authorization
server to determine the active state of an OAuth 2.0 token and to
determine meta-information about this token. OAuth 2.0 deployments
can use this method to convey information about the authorization
context of the token from the authorization server to the protected
resource.
To prevent the values of access tokens from leaking into
server-side logs via query parameters, an authorization server
offering token introspection MAY disallow the use of HTTP GET on
the introspection endpoint and instead require the HTTP POST method
to be used at the introspection endpoint.
"""
valid_token_types = ('access_token', 'refresh_token')
valid_request_methods = ('POST',)
def __init__(self, request_validator, supported_token_types=None):
BaseEndpoint.__init__(self)
self.request_validator = request_validator
self.supported_token_types = (
supported_token_types or self.valid_token_types)
@catch_errors_and_unavailability
def create_introspect_response(self, uri, http_method='POST', body=None,
headers=None):
"""Create introspect valid or invalid response
If the authorization server is unable to determine the state
of the token without additional information, it SHOULD return
an introspection response indicating the token is not active
as described in Section 2.2.
"""
resp_headers = {
'Content-Type': 'application/json',
'Cache-Control': 'no-store',
'Pragma': 'no-cache',
}
request = Request(uri, http_method, body, headers)
try:
self.validate_introspect_request(request)
log.debug('Token introspect valid for %r.', request)
except OAuth2Error as e:
log.debug('Client error during validation of %r. %r.', request, e)
resp_headers.update(e.headers)
return resp_headers, e.json, e.status_code
claims = self.request_validator.introspect_token(
request.token,
request.token_type_hint,
request
)
if claims is None:
return resp_headers, json.dumps(dict(active=False)), 200
if "active" in claims:
claims.pop("active")
return resp_headers, json.dumps(dict(active=True, **claims)), 200
def validate_introspect_request(self, request):
"""Ensure the request is valid.
The protected resource calls the introspection endpoint using
an HTTP POST request with parameters sent as
"application/x-www-form-urlencoded".
token REQUIRED. The string value of the token.
token_type_hint OPTIONAL.
A hint about the type of the token submitted for
introspection. The protected resource MAY pass this parameter to
help the authorization server optimize the token lookup. If the
server is unable to locate the token using the given hint, it MUST
extend its search across all of its supported token types. An
authorization server MAY ignore this parameter, particularly if it
is able to detect the token type automatically.
* access_token: An Access Token as defined in [`RFC6749`],
`section 1.4`_
* refresh_token: A Refresh Token as defined in [`RFC6749`],
`section 1.5`_
The introspection endpoint MAY accept other OPTIONAL
parameters to provide further context to the query. For
instance, an authorization server may desire to know the IP
address of the client accessing the protected resource to
determine if the correct client is likely to be presenting the
token. The definition of this or any other parameters are
outside the scope of this specification, to be defined by
service documentation or extensions to this specification.
.. _`section 1.4`: http://tools.ietf.org/html/rfc6749#section-1.4
.. _`section 1.5`: http://tools.ietf.org/html/rfc6749#section-1.5
.. _`RFC6749`: http://tools.ietf.org/html/rfc6749
"""
self._raise_on_bad_method(request)
self._raise_on_bad_post_request(request)
self._raise_on_missing_token(request)
self._raise_on_invalid_client(request)
self._raise_on_unsupported_token(request)
| 4,983
|
Python
|
.py
| 100
| 41.46
| 78
| 0.678667
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,818
|
implicit.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/grant_types/implicit.py
|
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import logging
from oauthlib import common
from .. import errors
from .base import GrantTypeBase
log = logging.getLogger(__name__)
class ImplicitGrant(GrantTypeBase):
"""`Implicit Grant`_
The implicit grant type is used to obtain access tokens (it does not
support the issuance of refresh tokens) and is optimized for public
clients known to operate a particular redirection URI. These clients
are typically implemented in a browser using a scripting language
such as JavaScript.
Unlike the authorization code grant type, in which the client makes
separate requests for authorization and for an access token, the
client receives the access token as the result of the authorization
request.
The implicit grant type does not include client authentication, and
relies on the presence of the resource owner and the registration of
the redirection URI. Because the access token is encoded into the
redirection URI, it may be exposed to the resource owner and other
applications residing on the same device::
+----------+
| Resource |
| Owner |
| |
+----------+
^
|
(B)
+----|-----+ Client Identifier +---------------+
| -+----(A)-- & Redirection URI --->| |
| User- | | Authorization |
| Agent -|----(B)-- User authenticates -->| Server |
| | | |
| |<---(C)--- Redirection URI ----<| |
| | with Access Token +---------------+
| | in Fragment
| | +---------------+
| |----(D)--- Redirection URI ---->| Web-Hosted |
| | without Fragment | Client |
| | | Resource |
| (F) |<---(E)------- Script ---------<| |
| | +---------------+
+-|--------+
| |
(A) (G) Access Token
| |
^ v
+---------+
| |
| Client |
| |
+---------+
Note: The lines illustrating steps (A) and (B) are broken into two
parts as they pass through the user-agent.
Figure 4: Implicit Grant Flow
The flow illustrated in Figure 4 includes the following steps:
(A) The client initiates the flow by directing the resource owner's
user-agent to the authorization endpoint. The client includes
its client identifier, requested scope, local state, and a
redirection URI to which the authorization server will send the
user-agent back once access is granted (or denied).
(B) The authorization server authenticates the resource owner (via
the user-agent) and establishes whether the resource owner
grants or denies the client's access request.
(C) Assuming the resource owner grants access, the authorization
server redirects the user-agent back to the client using the
redirection URI provided earlier. The redirection URI includes
the access token in the URI fragment.
(D) The user-agent follows the redirection instructions by making a
request to the web-hosted client resource (which does not
include the fragment per [RFC2616]). The user-agent retains the
fragment information locally.
(E) The web-hosted client resource returns a web page (typically an
HTML document with an embedded script) capable of accessing the
full redirection URI including the fragment retained by the
user-agent, and extracting the access token (and other
parameters) contained in the fragment.
(F) The user-agent executes the script provided by the web-hosted
client resource locally, which extracts the access token.
(G) The user-agent passes the access token to the client.
See `Section 10.3`_ and `Section 10.16`_ for important security considerations
when using the implicit grant.
.. _`Implicit Grant`: https://tools.ietf.org/html/rfc6749#section-4.2
.. _`Section 10.3`: https://tools.ietf.org/html/rfc6749#section-10.3
.. _`Section 10.16`: https://tools.ietf.org/html/rfc6749#section-10.16
"""
response_types = ['token']
grant_allows_refresh_token = False
def create_authorization_response(self, request, token_handler):
"""Create an authorization response.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
response_type
REQUIRED. Value MUST be set to "token" for standard OAuth2 implicit flow
or "id_token token" or just "id_token" for OIDC implicit flow
client_id
REQUIRED. The client identifier as described in `Section 2.2`_.
redirect_uri
OPTIONAL. As described in `Section 3.1.2`_.
scope
OPTIONAL. The scope of the access request as described by
`Section 3.3`_.
state
RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in `Section 10.12`_.
The authorization server validates the request to ensure that all
required parameters are present and valid. The authorization server
MUST verify that the redirection URI to which it will redirect the
access token matches a redirection URI registered by the client as
described in `Section 3.1.2`_.
.. _`Section 2.2`: https://tools.ietf.org/html/rfc6749#section-2.2
.. _`Section 3.1.2`: https://tools.ietf.org/html/rfc6749#section-3.1.2
.. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 10.12`: https://tools.ietf.org/html/rfc6749#section-10.12
.. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
"""
return self.create_token_response(request, token_handler)
def create_token_response(self, request, token_handler):
"""Return token or error embedded in the URI fragment.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
If the resource owner grants the access request, the authorization
server issues an access token and delivers it to the client by adding
the following parameters to the fragment component of the redirection
URI using the "application/x-www-form-urlencoded" format, per
`Appendix B`_:
access_token
REQUIRED. The access token issued by the authorization server.
token_type
REQUIRED. The type of the token issued as described in
`Section 7.1`_. Value is case insensitive.
expires_in
RECOMMENDED. The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
scope
OPTIONAL, if identical to the scope requested by the client;
otherwise, REQUIRED. The scope of the access token as
described by `Section 3.3`_.
state
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
The authorization server MUST NOT issue a refresh token.
.. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
.. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1
"""
try:
self.validate_token_request(request)
# If the request fails due to a missing, invalid, or mismatching
# redirection URI, or if the client identifier is missing or invalid,
# the authorization server SHOULD inform the resource owner of the
# error and MUST NOT automatically redirect the user-agent to the
# invalid redirection URI.
except errors.FatalClientError as e:
log.debug('Fatal client error during validation of %r. %r.',
request, e)
raise
# If the resource owner denies the access request or if the request
# fails for reasons other than a missing or invalid redirection URI,
# the authorization server informs the client by adding the following
# parameters to the fragment component of the redirection URI using the
# "application/x-www-form-urlencoded" format, per Appendix B:
# https://tools.ietf.org/html/rfc6749#appendix-B
except errors.OAuth2Error as e:
log.debug('Client error during validation of %r. %r.', request, e)
return {'Location': common.add_params_to_uri(request.redirect_uri, e.twotuples,
fragment=True)}, None, 302
# In OIDC implicit flow it is possible to have a request_type that does not include the access_token!
# "id_token token" - return the access token and the id token
# "id_token" - don't return the access token
if "token" in request.response_type.split():
token = token_handler.create_token(request, refresh_token=False)
else:
token = {}
if request.state is not None:
token['state'] = request.state
for modifier in self._token_modifiers:
token = modifier(token, token_handler, request)
# In OIDC implicit flow it is possible to have a request_type that does
# not include the access_token! In this case there is no need to save a token.
if "token" in request.response_type.split():
self.request_validator.save_token(token, request)
return self.prepare_authorization_response(
request, token, {}, None, 302)
def validate_authorization_request(self, request):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
return self.validate_token_request(request)
def validate_token_request(self, request):
"""Check the token request for normal and fatal errors.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
This method is very similar to validate_authorization_request in
the AuthorizationCodeGrant but differ in a few subtle areas.
A normal error could be a missing response_type parameter or the client
attempting to access scope it is not allowed to ask authorization for.
Normal errors can safely be included in the redirection URI and
sent back to the client.
Fatal errors occur when the client_id or redirect_uri is invalid or
missing. These must be caught by the provider and handled, how this
is done is outside of the scope of OAuthLib but showing an error
page describing the issue is a good idea.
"""
# First check for fatal errors
# If the request fails due to a missing, invalid, or mismatching
# redirection URI, or if the client identifier is missing or invalid,
# the authorization server SHOULD inform the resource owner of the
# error and MUST NOT automatically redirect the user-agent to the
# invalid redirection URI.
# First check duplicate parameters
for param in ('client_id', 'response_type', 'redirect_uri', 'scope', 'state'):
try:
duplicate_params = request.duplicate_params
except ValueError:
raise errors.InvalidRequestFatalError(description='Unable to parse query string', request=request)
if param in duplicate_params:
raise errors.InvalidRequestFatalError(description='Duplicate %s parameter.' % param, request=request)
# REQUIRED. The client identifier as described in Section 2.2.
# https://tools.ietf.org/html/rfc6749#section-2.2
if not request.client_id:
raise errors.MissingClientIdError(request=request)
if not self.request_validator.validate_client_id(request.client_id, request):
raise errors.InvalidClientIdError(request=request)
# OPTIONAL. As described in Section 3.1.2.
# https://tools.ietf.org/html/rfc6749#section-3.1.2
self._handle_redirects(request)
# Then check for normal errors.
request_info = self._run_custom_validators(request,
self.custom_validators.all_pre)
# If the resource owner denies the access request or if the request
# fails for reasons other than a missing or invalid redirection URI,
# the authorization server informs the client by adding the following
# parameters to the fragment component of the redirection URI using the
# "application/x-www-form-urlencoded" format, per Appendix B.
# https://tools.ietf.org/html/rfc6749#appendix-B
# Note that the correct parameters to be added are automatically
# populated through the use of specific exceptions
# REQUIRED.
if request.response_type is None:
raise errors.MissingResponseTypeError(request=request)
# Value MUST be one of our registered types: "token" by default or if using OIDC "id_token" or "id_token token"
elif not set(request.response_type.split()).issubset(self.response_types):
raise errors.UnsupportedResponseTypeError(request=request)
log.debug('Validating use of response_type token for client %r (%r).',
request.client_id, request.client)
if not self.request_validator.validate_response_type(request.client_id,
request.response_type,
request.client, request):
log.debug('Client %s is not authorized to use response_type %s.',
request.client_id, request.response_type)
raise errors.UnauthorizedClientError(request=request)
# OPTIONAL. The scope of the access request as described by Section 3.3
# https://tools.ietf.org/html/rfc6749#section-3.3
self.validate_scopes(request)
request_info.update({
'client_id': request.client_id,
'redirect_uri': request.redirect_uri,
'response_type': request.response_type,
'state': request.state,
'request': request,
})
request_info = self._run_custom_validators(
request,
self.custom_validators.all_post,
request_info
)
return request.scopes, request_info
def _run_custom_validators(self,
request,
validations,
request_info=None):
# Make a copy so we don't modify the existing request_info dict
request_info = {} if request_info is None else request_info.copy()
# For implicit grant, auth_validators and token_validators are
# basically equivalent since the token is returned from the
# authorization endpoint.
for validator in validations:
result = validator(request)
if result is not None:
request_info.update(result)
return request_info
| 16,852
|
Python
|
.py
| 302
| 44.94702
| 119
| 0.624909
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,819
|
client_credentials.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/grant_types/client_credentials.py
|
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import json
import logging
from .. import errors
from .base import GrantTypeBase
log = logging.getLogger(__name__)
class ClientCredentialsGrant(GrantTypeBase):
"""`Client Credentials Grant`_
The client can request an access token using only its client
credentials (or other supported means of authentication) when the
client is requesting access to the protected resources under its
control, or those of another resource owner that have been previously
arranged with the authorization server (the method of which is beyond
the scope of this specification).
The client credentials grant type MUST only be used by confidential
clients::
+---------+ +---------------+
: : : :
: :>-- A - Client Authentication --->: Authorization :
: Client : : Server :
: :<-- B ---- Access Token ---------<: :
: : : :
+---------+ +---------------+
Figure 6: Client Credentials Flow
The flow illustrated in Figure 6 includes the following steps:
(A) The client authenticates with the authorization server and
requests an access token from the token endpoint.
(B) The authorization server authenticates the client, and if valid,
issues an access token.
.. _`Client Credentials Grant`: https://tools.ietf.org/html/rfc6749#section-4.4
"""
def create_token_response(self, request, token_handler):
"""Return token or error in JSON format.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
If the access token request is valid and authorized, the
authorization server issues an access token as described in
`Section 5.1`_. A refresh token SHOULD NOT be included. If the request
failed client authentication or is invalid, the authorization server
returns an error response as described in `Section 5.2`_.
.. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
"""
headers = self._get_default_headers()
try:
log.debug('Validating access token request, %r.', request)
self.validate_token_request(request)
except errors.OAuth2Error as e:
log.debug('Client error in token request. %s.', e)
headers.update(e.headers)
return headers, e.json, e.status_code
token = token_handler.create_token(request, refresh_token=False)
for modifier in self._token_modifiers:
token = modifier(token)
self.request_validator.save_token(token, request)
log.debug('Issuing token to client id %r (%r), %r.',
request.client_id, request.client, token)
return headers, json.dumps(token), 200
def validate_token_request(self, request):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
for validator in self.custom_validators.pre_token:
validator(request)
if not getattr(request, 'grant_type', None):
raise errors.InvalidRequestError('Request is missing grant type.',
request=request)
if not request.grant_type == 'client_credentials':
raise errors.UnsupportedGrantTypeError(request=request)
for param in ('grant_type', 'scope'):
if param in request.duplicate_params:
raise errors.InvalidRequestError(description='Duplicate %s parameter.' % param,
request=request)
log.debug('Authenticating client, %r.', request)
if not self.request_validator.authenticate_client(request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
else:
if not hasattr(request.client, 'client_id'):
raise NotImplementedError('Authenticate client must set the '
'request.client.client_id attribute '
'in authenticate_client.')
# Ensure client is authorized use of this grant type
self.validate_grant_type(request)
request.client_id = request.client_id or request.client.client_id
log.debug('Authorizing access to client %r.', request.client_id)
self.validate_scopes(request)
for validator in self.custom_validators.post_token:
validator(request)
| 5,079
|
Python
|
.py
| 95
| 43.010526
| 95
| 0.598467
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,820
|
refresh_token.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/grant_types/refresh_token.py
|
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import json
import logging
from .. import errors, utils
from .base import GrantTypeBase
log = logging.getLogger(__name__)
class RefreshTokenGrant(GrantTypeBase):
"""`Refresh token grant`_
.. _`Refresh token grant`: https://tools.ietf.org/html/rfc6749#section-6
"""
def __init__(self, request_validator=None,
issue_new_refresh_tokens=True,
**kwargs):
super().__init__(
request_validator,
issue_new_refresh_tokens=issue_new_refresh_tokens,
**kwargs)
def create_token_response(self, request, token_handler):
"""Create a new access token from a refresh_token.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
If valid and authorized, the authorization server issues an access
token as described in `Section 5.1`_. If the request failed
verification or is invalid, the authorization server returns an error
response as described in `Section 5.2`_.
The authorization server MAY issue a new refresh token, in which case
the client MUST discard the old refresh token and replace it with the
new refresh token. The authorization server MAY revoke the old
refresh token after issuing a new refresh token to the client. If a
new refresh token is issued, the refresh token scope MUST be
identical to that of the refresh token included by the client in the
request.
.. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
"""
headers = self._get_default_headers()
try:
log.debug('Validating refresh token request, %r.', request)
self.validate_token_request(request)
except errors.OAuth2Error as e:
log.debug('Client error in token request, %s.', e)
headers.update(e.headers)
return headers, e.json, e.status_code
token = token_handler.create_token(request,
refresh_token=self.issue_new_refresh_tokens)
for modifier in self._token_modifiers:
token = modifier(token)
self.request_validator.save_token(token, request)
log.debug('Issuing new token to client id %r (%r), %r.',
request.client_id, request.client, token)
return headers, json.dumps(token), 200
def validate_token_request(self, request):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
# REQUIRED. Value MUST be set to "refresh_token".
if request.grant_type != 'refresh_token':
raise errors.UnsupportedGrantTypeError(request=request)
for validator in self.custom_validators.pre_token:
validator(request)
if request.refresh_token is None:
raise errors.InvalidRequestError(
description='Missing refresh token parameter.',
request=request)
# Because refresh tokens are typically long-lasting credentials used to
# request additional access tokens, the refresh token is bound to the
# client to which it was issued. If the client type is confidential or
# the client was issued client credentials (or assigned other
# authentication requirements), the client MUST authenticate with the
# authorization server as described in Section 3.2.1.
# https://tools.ietf.org/html/rfc6749#section-3.2.1
if self.request_validator.client_authentication_required(request):
log.debug('Authenticating client, %r.', request)
if not self.request_validator.authenticate_client(request):
log.debug('Invalid client (%r), denying access.', request)
raise errors.InvalidClientError(request=request)
elif not self.request_validator.authenticate_client_id(request.client_id, request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
# Ensure client is authorized use of this grant type
self.validate_grant_type(request)
# REQUIRED. The refresh token issued to the client.
log.debug('Validating refresh token %s for client %r.',
request.refresh_token, request.client)
if not self.request_validator.validate_refresh_token(
request.refresh_token, request.client, request):
log.debug('Invalid refresh token, %s, for client %r.',
request.refresh_token, request.client)
raise errors.InvalidGrantError(request=request)
original_scopes = utils.scope_to_list(
self.request_validator.get_original_scopes(
request.refresh_token, request))
if request.scope:
request.scopes = utils.scope_to_list(request.scope)
if (not all(s in original_scopes for s in request.scopes)
and not self.request_validator.is_within_original_scope(
request.scopes, request.refresh_token, request)):
log.debug('Refresh token %s lack requested scopes, %r.',
request.refresh_token, request.scopes)
raise errors.InvalidScopeError(request=request)
else:
request.scopes = original_scopes
for validator in self.custom_validators.post_token:
validator(request)
| 5,808
|
Python
|
.py
| 110
| 41.763636
| 91
| 0.644985
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,821
|
resource_owner_password_credentials.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/grant_types/resource_owner_password_credentials.py
|
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import json
import logging
from .. import errors
from .base import GrantTypeBase
log = logging.getLogger(__name__)
class ResourceOwnerPasswordCredentialsGrant(GrantTypeBase):
"""`Resource Owner Password Credentials Grant`_
The resource owner password credentials grant type is suitable in
cases where the resource owner has a trust relationship with the
client, such as the device operating system or a highly privileged
application. The authorization server should take special care when
enabling this grant type and only allow it when other flows are not
viable.
This grant type is suitable for clients capable of obtaining the
resource owner's credentials (username and password, typically using
an interactive form). It is also used to migrate existing clients
using direct authentication schemes such as HTTP Basic or Digest
authentication to OAuth by converting the stored credentials to an
access token::
+----------+
| Resource |
| Owner |
| |
+----------+
v
| Resource Owner
(A) Password Credentials
|
v
+---------+ +---------------+
| |>--(B)---- Resource Owner ------->| |
| | Password Credentials | Authorization |
| Client | | Server |
| |<--(C)---- Access Token ---------<| |
| | (w/ Optional Refresh Token) | |
+---------+ +---------------+
Figure 5: Resource Owner Password Credentials Flow
The flow illustrated in Figure 5 includes the following steps:
(A) The resource owner provides the client with its username and
password.
(B) The client requests an access token from the authorization
server's token endpoint by including the credentials received
from the resource owner. When making the request, the client
authenticates with the authorization server.
(C) The authorization server authenticates the client and validates
the resource owner credentials, and if valid, issues an access
token.
.. _`Resource Owner Password Credentials Grant`: https://tools.ietf.org/html/rfc6749#section-4.3
"""
def create_token_response(self, request, token_handler):
"""Return token or error in json format.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
If the access token request is valid and authorized, the
authorization server issues an access token and optional refresh
token as described in `Section 5.1`_. If the request failed client
authentication or is invalid, the authorization server returns an
error response as described in `Section 5.2`_.
.. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
"""
headers = self._get_default_headers()
try:
if self.request_validator.client_authentication_required(request):
log.debug('Authenticating client, %r.', request)
if not self.request_validator.authenticate_client(request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
elif not self.request_validator.authenticate_client_id(request.client_id, request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
log.debug('Validating access token request, %r.', request)
self.validate_token_request(request)
except errors.OAuth2Error as e:
log.debug('Client error in token request, %s.', e)
headers.update(e.headers)
return headers, e.json, e.status_code
token = token_handler.create_token(request, self.refresh_token)
for modifier in self._token_modifiers:
token = modifier(token)
self.request_validator.save_token(token, request)
log.debug('Issuing token %r to client id %r (%r) and username %s.',
token, request.client_id, request.client, request.username)
return headers, json.dumps(token), 200
def validate_token_request(self, request):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
The client makes a request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format per Appendix B with a character encoding of UTF-8 in the HTTP
request entity-body:
grant_type
REQUIRED. Value MUST be set to "password".
username
REQUIRED. The resource owner username.
password
REQUIRED. The resource owner password.
scope
OPTIONAL. The scope of the access request as described by
`Section 3.3`_.
If the client type is confidential or the client was issued client
credentials (or assigned other authentication requirements), the
client MUST authenticate with the authorization server as described
in `Section 3.2.1`_.
The authorization server MUST:
o require client authentication for confidential clients or for any
client that was issued client credentials (or with other
authentication requirements),
o authenticate the client if client authentication is included, and
o validate the resource owner password credentials using its
existing password validation algorithm.
Since this access token request utilizes the resource owner's
password, the authorization server MUST protect the endpoint against
brute force attacks (e.g., using rate-limitation or generating
alerts).
.. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 3.2.1`: https://tools.ietf.org/html/rfc6749#section-3.2.1
"""
for validator in self.custom_validators.pre_token:
validator(request)
for param in ('grant_type', 'username', 'password'):
if not getattr(request, param, None):
raise errors.InvalidRequestError(
'Request is missing %s parameter.' % param, request=request)
for param in ('grant_type', 'username', 'password', 'scope'):
if param in request.duplicate_params:
raise errors.InvalidRequestError(description='Duplicate %s parameter.' % param, request=request)
# This error should rarely (if ever) occur if requests are routed to
# grant type handlers based on the grant_type parameter.
if not request.grant_type == 'password':
raise errors.UnsupportedGrantTypeError(request=request)
log.debug('Validating username %s.', request.username)
if not self.request_validator.validate_user(request.username,
request.password, request.client, request):
raise errors.InvalidGrantError(
'Invalid credentials given.', request=request)
else:
if not hasattr(request.client, 'client_id'):
raise NotImplementedError(
'Validate user must set the '
'request.client.client_id attribute '
'in authenticate_client.')
log.debug('Authorizing access to user %r.', request.user)
# Ensure client is authorized use of this grant type
self.validate_grant_type(request)
if request.client:
request.client_id = request.client_id or request.client.client_id
self.validate_scopes(request)
for validator in self.custom_validators.post_token:
validator(request)
| 8,516
|
Python
|
.py
| 157
| 43.216561
| 112
| 0.622941
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,822
|
__init__.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/grant_types/__init__.py
|
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from .authorization_code import AuthorizationCodeGrant
from .client_credentials import ClientCredentialsGrant
from .implicit import ImplicitGrant
from .refresh_token import RefreshTokenGrant
from .resource_owner_password_credentials import (
ResourceOwnerPasswordCredentialsGrant,
)
| 368
|
Python
|
.py
| 11
| 32.090909
| 54
| 0.778711
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,823
|
authorization_code.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/grant_types/authorization_code.py
|
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import base64
import hashlib
import json
import logging
from oauthlib import common
from .. import errors
from .base import GrantTypeBase
log = logging.getLogger(__name__)
def code_challenge_method_s256(verifier, challenge):
"""
If the "code_challenge_method" from `Section 4.3`_ was "S256", the
received "code_verifier" is hashed by SHA-256, base64url-encoded, and
then compared to the "code_challenge", i.e.:
BASE64URL-ENCODE(SHA256(ASCII(code_verifier))) == code_challenge
How to implement a base64url-encoding
function without padding, based upon the standard base64-encoding
function that uses padding.
To be concrete, example C# code implementing these functions is shown
below. Similar code could be used in other languages.
static string base64urlencode(byte [] arg)
{
string s = Convert.ToBase64String(arg); // Regular base64 encoder
s = s.Split('=')[0]; // Remove any trailing '='s
s = s.Replace('+', '-'); // 62nd char of encoding
s = s.Replace('/', '_'); // 63rd char of encoding
return s;
}
In python urlsafe_b64encode is already replacing '+' and '/', but preserve
the trailing '='. So we have to remove it.
.. _`Section 4.3`: https://tools.ietf.org/html/rfc7636#section-4.3
"""
return base64.urlsafe_b64encode(
hashlib.sha256(verifier.encode()).digest()
).decode().rstrip('=') == challenge
def code_challenge_method_plain(verifier, challenge):
"""
If the "code_challenge_method" from `Section 4.3`_ was "plain", they are
compared directly, i.e.:
code_verifier == code_challenge.
.. _`Section 4.3`: https://tools.ietf.org/html/rfc7636#section-4.3
"""
return verifier == challenge
class AuthorizationCodeGrant(GrantTypeBase):
"""`Authorization Code Grant`_
The authorization code grant type is used to obtain both access
tokens and refresh tokens and is optimized for confidential clients.
Since this is a redirection-based flow, the client must be capable of
interacting with the resource owner's user-agent (typically a web
browser) and capable of receiving incoming requests (via redirection)
from the authorization server::
+----------+
| Resource |
| Owner |
| |
+----------+
^
|
(B)
+----|-----+ Client Identifier +---------------+
| -+----(A)-- & Redirection URI ---->| |
| User- | | Authorization |
| Agent -+----(B)-- User authenticates --->| Server |
| | | |
| -+----(C)-- Authorization Code ---<| |
+-|----|---+ +---------------+
| | ^ v
(A) (C) | |
| | | |
^ v | |
+---------+ | |
| |>---(D)-- Authorization Code ---------' |
| Client | & Redirection URI |
| | |
| |<---(E)----- Access Token -------------------'
+---------+ (w/ Optional Refresh Token)
Note: The lines illustrating steps (A), (B), and (C) are broken into
two parts as they pass through the user-agent.
Figure 3: Authorization Code Flow
The flow illustrated in Figure 3 includes the following steps:
(A) The client initiates the flow by directing the resource owner's
user-agent to the authorization endpoint. The client includes
its client identifier, requested scope, local state, and a
redirection URI to which the authorization server will send the
user-agent back once access is granted (or denied).
(B) The authorization server authenticates the resource owner (via
the user-agent) and establishes whether the resource owner
grants or denies the client's access request.
(C) Assuming the resource owner grants access, the authorization
server redirects the user-agent back to the client using the
redirection URI provided earlier (in the request or during
client registration). The redirection URI includes an
authorization code and any local state provided by the client
earlier.
(D) The client requests an access token from the authorization
server's token endpoint by including the authorization code
received in the previous step. When making the request, the
client authenticates with the authorization server. The client
includes the redirection URI used to obtain the authorization
code for verification.
(E) The authorization server authenticates the client, validates the
authorization code, and ensures that the redirection URI
received matches the URI used to redirect the client in
step (C). If valid, the authorization server responds back with
an access token and, optionally, a refresh token.
OAuth 2.0 public clients utilizing the Authorization Code Grant are
susceptible to the authorization code interception attack.
A technique to mitigate against the threat through the use of Proof Key for Code
Exchange (PKCE, pronounced "pixy") is implemented in the current oauthlib
implementation.
.. _`Authorization Code Grant`: https://tools.ietf.org/html/rfc6749#section-4.1
.. _`PKCE`: https://tools.ietf.org/html/rfc7636
"""
default_response_mode = 'query'
response_types = ['code']
# This dict below is private because as RFC mention it:
# "S256" is Mandatory To Implement (MTI) on the server.
#
_code_challenge_methods = {
'plain': code_challenge_method_plain,
'S256': code_challenge_method_s256
}
def create_authorization_code(self, request):
"""
Generates an authorization grant represented as a dictionary.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
grant = {'code': common.generate_token()}
if hasattr(request, 'state') and request.state:
grant['state'] = request.state
log.debug('Created authorization code grant %r for request %r.',
grant, request)
return grant
def create_authorization_response(self, request, token_handler):
"""
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
response_type
REQUIRED. Value MUST be set to "code" for standard OAuth2
authorization flow. For OpenID Connect it must be one of
"code token", "code id_token", or "code token id_token" - we
essentially test that "code" appears in the response_type.
client_id
REQUIRED. The client identifier as described in `Section 2.2`_.
redirect_uri
OPTIONAL. As described in `Section 3.1.2`_.
scope
OPTIONAL. The scope of the access request as described by
`Section 3.3`_.
state
RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in `Section 10.12`_.
The client directs the resource owner to the constructed URI using an
HTTP redirection response, or by other means available to it via the
user-agent.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
:returns: headers, body, status
:raises: FatalClientError on invalid redirect URI or client id.
A few examples::
>>> from your_validator import your_validator
>>> request = Request('https://example.com/authorize?client_id=valid'
... '&redirect_uri=http%3A%2F%2Fclient.com%2F')
>>> from oauthlib.common import Request
>>> from oauthlib.oauth2 import AuthorizationCodeGrant, BearerToken
>>> token = BearerToken(your_validator)
>>> grant = AuthorizationCodeGrant(your_validator)
>>> request.scopes = ['authorized', 'in', 'some', 'form']
>>> grant.create_authorization_response(request, token)
(u'http://client.com/?error=invalid_request&error_description=Missing+response_type+parameter.', None, None, 400)
>>> request = Request('https://example.com/authorize?client_id=valid'
... '&redirect_uri=http%3A%2F%2Fclient.com%2F'
... '&response_type=code')
>>> request.scopes = ['authorized', 'in', 'some', 'form']
>>> grant.create_authorization_response(request, token)
(u'http://client.com/?code=u3F05aEObJuP2k7DordviIgW5wl52N', None, None, 200)
>>> # If the client id or redirect uri fails validation
>>> grant.create_authorization_response(request, token)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "oauthlib/oauth2/rfc6749/grant_types.py", line 515, in create_authorization_response
>>> grant.create_authorization_response(request, token)
File "oauthlib/oauth2/rfc6749/grant_types.py", line 591, in validate_authorization_request
oauthlib.oauth2.rfc6749.errors.InvalidClientIdError
.. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
.. _`Section 2.2`: https://tools.ietf.org/html/rfc6749#section-2.2
.. _`Section 3.1.2`: https://tools.ietf.org/html/rfc6749#section-3.1.2
.. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 10.12`: https://tools.ietf.org/html/rfc6749#section-10.12
"""
try:
self.validate_authorization_request(request)
log.debug('Pre resource owner authorization validation ok for %r.',
request)
# If the request fails due to a missing, invalid, or mismatching
# redirection URI, or if the client identifier is missing or invalid,
# the authorization server SHOULD inform the resource owner of the
# error and MUST NOT automatically redirect the user-agent to the
# invalid redirection URI.
except errors.FatalClientError as e:
log.debug('Fatal client error during validation of %r. %r.',
request, e)
raise
# If the resource owner denies the access request or if the request
# fails for reasons other than a missing or invalid redirection URI,
# the authorization server informs the client by adding the following
# parameters to the query component of the redirection URI using the
# "application/x-www-form-urlencoded" format, per Appendix B:
# https://tools.ietf.org/html/rfc6749#appendix-B
except errors.OAuth2Error as e:
log.debug('Client error during validation of %r. %r.', request, e)
request.redirect_uri = request.redirect_uri or self.error_uri
redirect_uri = common.add_params_to_uri(
request.redirect_uri, e.twotuples,
fragment=request.response_mode == "fragment")
return {'Location': redirect_uri}, None, 302
grant = self.create_authorization_code(request)
for modifier in self._code_modifiers:
grant = modifier(grant, token_handler, request)
log.debug('Saving grant %r for %r.', grant, request)
self.request_validator.save_authorization_code(
request.client_id, grant, request)
return self.prepare_authorization_response(
request, grant, {}, None, 302)
def create_token_response(self, request, token_handler):
"""Validate the authorization code.
The client MUST NOT use the authorization code more than once. If an
authorization code is used more than once, the authorization server
MUST deny the request and SHOULD revoke (when possible) all tokens
previously issued based on that authorization code. The authorization
code is bound to the client identifier and redirection URI.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
"""
headers = self._get_default_headers()
try:
self.validate_token_request(request)
log.debug('Token request validation ok for %r.', request)
except errors.OAuth2Error as e:
log.debug('Client error during validation of %r. %r.', request, e)
headers.update(e.headers)
return headers, e.json, e.status_code
token = token_handler.create_token(request, refresh_token=self.refresh_token)
for modifier in self._token_modifiers:
token = modifier(token, token_handler, request)
self.request_validator.save_token(token, request)
self.request_validator.invalidate_authorization_code(
request.client_id, request.code, request)
return headers, json.dumps(token), 200
def validate_authorization_request(self, request):
"""Check the authorization request for normal and fatal errors.
A normal error could be a missing response_type parameter or the client
attempting to access scope it is not allowed to ask authorization for.
Normal errors can safely be included in the redirection URI and
sent back to the client.
Fatal errors occur when the client_id or redirect_uri is invalid or
missing. These must be caught by the provider and handled, how this
is done is outside of the scope of OAuthLib but showing an error
page describing the issue is a good idea.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
# First check for fatal errors
# If the request fails due to a missing, invalid, or mismatching
# redirection URI, or if the client identifier is missing or invalid,
# the authorization server SHOULD inform the resource owner of the
# error and MUST NOT automatically redirect the user-agent to the
# invalid redirection URI.
# First check duplicate parameters
for param in ('client_id', 'response_type', 'redirect_uri', 'scope', 'state'):
try:
duplicate_params = request.duplicate_params
except ValueError:
raise errors.InvalidRequestFatalError(description='Unable to parse query string', request=request)
if param in duplicate_params:
raise errors.InvalidRequestFatalError(description='Duplicate %s parameter.' % param, request=request)
# REQUIRED. The client identifier as described in Section 2.2.
# https://tools.ietf.org/html/rfc6749#section-2.2
if not request.client_id:
raise errors.MissingClientIdError(request=request)
if not self.request_validator.validate_client_id(request.client_id, request):
raise errors.InvalidClientIdError(request=request)
# OPTIONAL. As described in Section 3.1.2.
# https://tools.ietf.org/html/rfc6749#section-3.1.2
log.debug('Validating redirection uri %s for client %s.',
request.redirect_uri, request.client_id)
# OPTIONAL. As described in Section 3.1.2.
# https://tools.ietf.org/html/rfc6749#section-3.1.2
self._handle_redirects(request)
# Then check for normal errors.
# If the resource owner denies the access request or if the request
# fails for reasons other than a missing or invalid redirection URI,
# the authorization server informs the client by adding the following
# parameters to the query component of the redirection URI using the
# "application/x-www-form-urlencoded" format, per Appendix B.
# https://tools.ietf.org/html/rfc6749#appendix-B
# Note that the correct parameters to be added are automatically
# populated through the use of specific exceptions.
request_info = {}
for validator in self.custom_validators.pre_auth:
request_info.update(validator(request))
# REQUIRED.
if request.response_type is None:
raise errors.MissingResponseTypeError(request=request)
# Value MUST be set to "code" or one of the OpenID authorization code including
# response_types "code token", "code id_token", "code token id_token"
elif not 'code' in request.response_type and request.response_type != 'none':
raise errors.UnsupportedResponseTypeError(request=request)
if not self.request_validator.validate_response_type(request.client_id,
request.response_type,
request.client, request):
log.debug('Client %s is not authorized to use response_type %s.',
request.client_id, request.response_type)
raise errors.UnauthorizedClientError(request=request)
# OPTIONAL. Validate PKCE request or reply with "error"/"invalid_request"
# https://tools.ietf.org/html/rfc6749#section-4.4.1
if self.request_validator.is_pkce_required(request.client_id, request) is True:
if request.code_challenge is None:
raise errors.MissingCodeChallengeError(request=request)
if request.code_challenge is not None:
request_info["code_challenge"] = request.code_challenge
# OPTIONAL, defaults to "plain" if not present in the request.
if request.code_challenge_method is None:
request.code_challenge_method = "plain"
if request.code_challenge_method not in self._code_challenge_methods:
raise errors.UnsupportedCodeChallengeMethodError(request=request)
request_info["code_challenge_method"] = request.code_challenge_method
# OPTIONAL. The scope of the access request as described by Section 3.3
# https://tools.ietf.org/html/rfc6749#section-3.3
self.validate_scopes(request)
request_info.update({
'client_id': request.client_id,
'redirect_uri': request.redirect_uri,
'response_type': request.response_type,
'state': request.state,
'request': request
})
for validator in self.custom_validators.post_auth:
request_info.update(validator(request))
return request.scopes, request_info
def validate_token_request(self, request):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
# REQUIRED. Value MUST be set to "authorization_code".
if request.grant_type not in ('authorization_code', 'openid'):
raise errors.UnsupportedGrantTypeError(request=request)
for validator in self.custom_validators.pre_token:
validator(request)
if request.code is None:
raise errors.InvalidRequestError(
description='Missing code parameter.', request=request)
for param in ('client_id', 'grant_type', 'redirect_uri'):
if param in request.duplicate_params:
raise errors.InvalidRequestError(description='Duplicate %s parameter.' % param,
request=request)
if self.request_validator.client_authentication_required(request):
# If the client type is confidential or the client was issued client
# credentials (or assigned other authentication requirements), the
# client MUST authenticate with the authorization server as described
# in Section 3.2.1.
# https://tools.ietf.org/html/rfc6749#section-3.2.1
if not self.request_validator.authenticate_client(request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
elif not self.request_validator.authenticate_client_id(request.client_id, request):
# REQUIRED, if the client is not authenticating with the
# authorization server as described in Section 3.2.1.
# https://tools.ietf.org/html/rfc6749#section-3.2.1
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
if not hasattr(request.client, 'client_id'):
raise NotImplementedError('Authenticate client must set the '
'request.client.client_id attribute '
'in authenticate_client.')
request.client_id = request.client_id or request.client.client_id
# Ensure client is authorized use of this grant type
self.validate_grant_type(request)
# REQUIRED. The authorization code received from the
# authorization server.
if not self.request_validator.validate_code(request.client_id,
request.code, request.client, request):
log.debug('Client, %r (%r), is not allowed access to scopes %r.',
request.client_id, request.client, request.scopes)
raise errors.InvalidGrantError(request=request)
# OPTIONAL. Validate PKCE code_verifier
challenge = self.request_validator.get_code_challenge(request.code, request)
if challenge is not None:
if request.code_verifier is None:
raise errors.MissingCodeVerifierError(request=request)
challenge_method = self.request_validator.get_code_challenge_method(request.code, request)
if challenge_method is None:
raise errors.InvalidGrantError(request=request, description="Challenge method not found")
if challenge_method not in self._code_challenge_methods:
raise errors.ServerError(
description="code_challenge_method {} is not supported.".format(challenge_method),
request=request
)
if not self.validate_code_challenge(challenge,
challenge_method,
request.code_verifier):
log.debug('request provided a invalid code_verifier.')
raise errors.InvalidGrantError(request=request)
elif self.request_validator.is_pkce_required(request.client_id, request) is True:
if request.code_verifier is None:
raise errors.MissingCodeVerifierError(request=request)
raise errors.InvalidGrantError(request=request, description="Challenge not found")
for attr in ('user', 'scopes'):
if getattr(request, attr, None) is None:
log.debug('request.%s was not set on code validation.', attr)
# REQUIRED, if the "redirect_uri" parameter was included in the
# authorization request as described in Section 4.1.1, and their
# values MUST be identical.
if request.redirect_uri is None:
request.using_default_redirect_uri = True
request.redirect_uri = self.request_validator.get_default_redirect_uri(
request.client_id, request)
log.debug('Using default redirect_uri %s.', request.redirect_uri)
if not request.redirect_uri:
raise errors.MissingRedirectURIError(request=request)
else:
request.using_default_redirect_uri = False
log.debug('Using provided redirect_uri %s', request.redirect_uri)
if not self.request_validator.confirm_redirect_uri(request.client_id, request.code,
request.redirect_uri, request.client,
request):
log.debug('Redirect_uri (%r) invalid for client %r (%r).',
request.redirect_uri, request.client_id, request.client)
raise errors.MismatchingRedirectURIError(request=request)
for validator in self.custom_validators.post_token:
validator(request)
def validate_code_challenge(self, challenge, challenge_method, verifier):
if challenge_method in self._code_challenge_methods:
return self._code_challenge_methods[challenge_method](verifier, challenge)
raise NotImplementedError('Unknown challenge_method %s' % challenge_method)
| 25,945
|
Python
|
.py
| 448
| 46.544643
| 125
| 0.628425
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,824
|
base.py
|
rembo10_headphones/lib/oauthlib/oauth2/rfc6749/grant_types/base.py
|
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import logging
from itertools import chain
from oauthlib.common import add_params_to_uri
from oauthlib.oauth2.rfc6749 import errors, utils
from oauthlib.uri_validate import is_absolute_uri
from ..request_validator import RequestValidator
log = logging.getLogger(__name__)
class ValidatorsContainer:
"""
Container object for holding custom validator callables to be invoked
as part of the grant type `validate_authorization_request()` or
`validate_authorization_request()` methods on the various grant types.
Authorization validators must be callables that take a request object and
return a dict, which may contain items to be added to the `request_info`
returned from the grant_type after validation.
Token validators must be callables that take a request object and
return None.
Both authorization validators and token validators may raise OAuth2
exceptions if validation conditions fail.
Authorization validators added to `pre_auth` will be run BEFORE
the standard validations (but after the critical ones that raise
fatal errors) as part of `validate_authorization_request()`
Authorization validators added to `post_auth` will be run AFTER
the standard validations as part of `validate_authorization_request()`
Token validators added to `pre_token` will be run BEFORE
the standard validations as part of `validate_token_request()`
Token validators added to `post_token` will be run AFTER
the standard validations as part of `validate_token_request()`
For example:
>>> def my_auth_validator(request):
... return {'myval': True}
>>> auth_code_grant = AuthorizationCodeGrant(request_validator)
>>> auth_code_grant.custom_validators.pre_auth.append(my_auth_validator)
>>> def my_token_validator(request):
... if not request.everything_okay:
... raise errors.OAuth2Error("uh-oh")
>>> auth_code_grant.custom_validators.post_token.append(my_token_validator)
"""
def __init__(self, post_auth, post_token,
pre_auth, pre_token):
self.pre_auth = pre_auth
self.post_auth = post_auth
self.pre_token = pre_token
self.post_token = post_token
@property
def all_pre(self):
return chain(self.pre_auth, self.pre_token)
@property
def all_post(self):
return chain(self.post_auth, self.post_token)
class GrantTypeBase:
error_uri = None
request_validator = None
default_response_mode = 'fragment'
refresh_token = True
response_types = ['code']
def __init__(self, request_validator=None, **kwargs):
self.request_validator = request_validator or RequestValidator()
# Transforms class variables into instance variables:
self.response_types = self.response_types
self.refresh_token = self.refresh_token
self._setup_custom_validators(kwargs)
self._code_modifiers = []
self._token_modifiers = []
for kw, val in kwargs.items():
setattr(self, kw, val)
def _setup_custom_validators(self, kwargs):
post_auth = kwargs.get('post_auth', [])
post_token = kwargs.get('post_token', [])
pre_auth = kwargs.get('pre_auth', [])
pre_token = kwargs.get('pre_token', [])
if not hasattr(self, 'validate_authorization_request'):
if post_auth or pre_auth:
msg = ("{} does not support authorization validators. Use "
"token validators instead.").format(self.__class__.__name__)
raise ValueError(msg)
# Using tuples here because they can't be appended to:
post_auth, pre_auth = (), ()
self.custom_validators = ValidatorsContainer(post_auth, post_token,
pre_auth, pre_token)
def register_response_type(self, response_type):
self.response_types.append(response_type)
def register_code_modifier(self, modifier):
self._code_modifiers.append(modifier)
def register_token_modifier(self, modifier):
self._token_modifiers.append(modifier)
def create_authorization_response(self, request, token_handler):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
"""
raise NotImplementedError('Subclasses must implement this method.')
def create_token_response(self, request, token_handler):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
"""
raise NotImplementedError('Subclasses must implement this method.')
def add_token(self, token, token_handler, request):
"""
:param token:
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
# Only add a hybrid access token on auth step if asked for
if not request.response_type in ["token", "code token", "id_token token", "code id_token token"]:
return token
token.update(token_handler.create_token(request, refresh_token=False))
return token
def validate_grant_type(self, request):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
client_id = getattr(request, 'client_id', None)
if not self.request_validator.validate_grant_type(client_id,
request.grant_type, request.client, request):
log.debug('Unauthorized from %r (%r) access to grant type %s.',
request.client_id, request.client, request.grant_type)
raise errors.UnauthorizedClientError(request=request)
def validate_scopes(self, request):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
if not request.scopes:
request.scopes = utils.scope_to_list(request.scope) or utils.scope_to_list(
self.request_validator.get_default_scopes(request.client_id, request))
log.debug('Validating access to scopes %r for client %r (%r).',
request.scopes, request.client_id, request.client)
if not self.request_validator.validate_scopes(request.client_id,
request.scopes, request.client, request):
raise errors.InvalidScopeError(request=request)
def prepare_authorization_response(self, request, token, headers, body, status):
"""Place token according to response mode.
Base classes can define a default response mode for their authorization
response by overriding the static `default_response_mode` member.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token:
:param headers:
:param body:
:param status:
"""
request.response_mode = request.response_mode or self.default_response_mode
if request.response_mode not in ('query', 'fragment'):
log.debug('Overriding invalid response mode %s with %s',
request.response_mode, self.default_response_mode)
request.response_mode = self.default_response_mode
token_items = token.items()
if request.response_type == 'none':
state = token.get('state', None)
if state:
token_items = [('state', state)]
else:
token_items = []
if request.response_mode == 'query':
headers['Location'] = add_params_to_uri(
request.redirect_uri, token_items, fragment=False)
return headers, body, status
if request.response_mode == 'fragment':
headers['Location'] = add_params_to_uri(
request.redirect_uri, token_items, fragment=True)
return headers, body, status
raise NotImplementedError(
'Subclasses must set a valid default_response_mode')
def _get_default_headers(self):
"""Create default headers for grant responses."""
return {
'Content-Type': 'application/json',
'Cache-Control': 'no-store',
'Pragma': 'no-cache',
}
def _handle_redirects(self, request):
if request.redirect_uri is not None:
request.using_default_redirect_uri = False
log.debug('Using provided redirect_uri %s', request.redirect_uri)
if not is_absolute_uri(request.redirect_uri):
raise errors.InvalidRedirectURIError(request=request)
# The authorization server MUST verify that the redirection URI
# to which it will redirect the access token matches a
# redirection URI registered by the client as described in
# Section 3.1.2.
# https://tools.ietf.org/html/rfc6749#section-3.1.2
if not self.request_validator.validate_redirect_uri(
request.client_id, request.redirect_uri, request):
raise errors.MismatchingRedirectURIError(request=request)
else:
request.redirect_uri = self.request_validator.get_default_redirect_uri(
request.client_id, request)
request.using_default_redirect_uri = True
log.debug('Using default redirect_uri %s.', request.redirect_uri)
if not request.redirect_uri:
raise errors.MissingRedirectURIError(request=request)
if not is_absolute_uri(request.redirect_uri):
raise errors.InvalidRedirectURIError(request=request)
| 10,213
|
Python
|
.py
| 206
| 39.368932
| 105
| 0.643381
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,825
|
oauth1_auth.py
|
rembo10_headphones/lib/requests_oauthlib/oauth1_auth.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from oauthlib.common import extract_params
from oauthlib.oauth1 import Client, SIGNATURE_HMAC, SIGNATURE_TYPE_AUTH_HEADER
from oauthlib.oauth1 import SIGNATURE_TYPE_BODY
from requests.compat import is_py3
from requests.utils import to_native_string
from requests.auth import AuthBase
CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded"
CONTENT_TYPE_MULTI_PART = "multipart/form-data"
if is_py3:
unicode = str
log = logging.getLogger(__name__)
# OBS!: Correct signing of requests are conditional on invoking OAuth1
# as the last step of preparing a request, or at least having the
# content-type set properly.
class OAuth1(AuthBase):
"""Signs the request using OAuth 1 (RFC5849)"""
client_class = Client
def __init__(
self,
client_key,
client_secret=None,
resource_owner_key=None,
resource_owner_secret=None,
callback_uri=None,
signature_method=SIGNATURE_HMAC,
signature_type=SIGNATURE_TYPE_AUTH_HEADER,
rsa_key=None,
verifier=None,
decoding="utf-8",
client_class=None,
force_include_body=False,
**kwargs
):
try:
signature_type = signature_type.upper()
except AttributeError:
pass
client_class = client_class or self.client_class
self.force_include_body = force_include_body
self.client = client_class(
client_key,
client_secret,
resource_owner_key,
resource_owner_secret,
callback_uri,
signature_method,
signature_type,
rsa_key,
verifier,
decoding=decoding,
**kwargs
)
def __call__(self, r):
"""Add OAuth parameters to the request.
Parameters may be included from the body if the content-type is
urlencoded, if no content type is set a guess is made.
"""
# Overwriting url is safe here as request will not modify it past
# this point.
log.debug("Signing request %s using client %s", r, self.client)
content_type = r.headers.get("Content-Type", "")
if (
not content_type
and extract_params(r.body)
or self.client.signature_type == SIGNATURE_TYPE_BODY
):
content_type = CONTENT_TYPE_FORM_URLENCODED
if not isinstance(content_type, unicode):
content_type = content_type.decode("utf-8")
is_form_encoded = CONTENT_TYPE_FORM_URLENCODED in content_type
log.debug(
"Including body in call to sign: %s",
is_form_encoded or self.force_include_body,
)
if is_form_encoded:
r.headers["Content-Type"] = CONTENT_TYPE_FORM_URLENCODED
r.url, headers, r.body = self.client.sign(
unicode(r.url), unicode(r.method), r.body or "", r.headers
)
elif self.force_include_body:
# To allow custom clients to work on non form encoded bodies.
r.url, headers, r.body = self.client.sign(
unicode(r.url), unicode(r.method), r.body or "", r.headers
)
else:
# Omit body data in the signing of non form-encoded requests
r.url, headers, _ = self.client.sign(
unicode(r.url), unicode(r.method), None, r.headers
)
r.prepare_headers(headers)
r.url = to_native_string(r.url)
log.debug("Updated url: %s", r.url)
log.debug("Updated headers: %s", headers)
log.debug("Updated body: %r", r.body)
return r
| 3,737
|
Python
|
.py
| 98
| 29.142857
| 78
| 0.616575
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,826
|
oauth2_auth.py
|
rembo10_headphones/lib/requests_oauthlib/oauth2_auth.py
|
from __future__ import unicode_literals
from oauthlib.oauth2 import WebApplicationClient, InsecureTransportError
from oauthlib.oauth2 import is_secure_transport
from requests.auth import AuthBase
class OAuth2(AuthBase):
"""Adds proof of authorization (OAuth2 token) to the request."""
def __init__(self, client_id=None, client=None, token=None):
"""Construct a new OAuth 2 authorization object.
:param client_id: Client id obtained during registration
:param client: :class:`oauthlib.oauth2.Client` to be used. Default is
WebApplicationClient which is useful for any
hosted application but not mobile or desktop.
:param token: Token dictionary, must include access_token
and token_type.
"""
self._client = client or WebApplicationClient(client_id, token=token)
if token:
for k, v in token.items():
setattr(self._client, k, v)
def __call__(self, r):
"""Append an OAuth 2 token to the request.
Note that currently HTTPS is required for all requests. There may be
a token type that allows for plain HTTP in the future and then this
should be updated to allow plain HTTP on a white list basis.
"""
if not is_secure_transport(r.url):
raise InsecureTransportError()
r.url, r.headers, r.body = self._client.add_token(
r.url, http_method=r.method, body=r.body, headers=r.headers
)
return r
| 1,548
|
Python
|
.py
| 31
| 40.354839
| 77
| 0.655195
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,827
|
__init__.py
|
rembo10_headphones/lib/requests_oauthlib/__init__.py
|
import logging
from .oauth1_auth import OAuth1
from .oauth1_session import OAuth1Session
from .oauth2_auth import OAuth2
from .oauth2_session import OAuth2Session, TokenUpdated
__version__ = "1.3.0"
import requests
if requests.__version__ < "2.0.0":
msg = (
"You are using requests version %s, which is older than "
"requests-oauthlib expects, please upgrade to 2.0.0 or later."
)
raise Warning(msg % requests.__version__)
logging.getLogger("requests_oauthlib").addHandler(logging.NullHandler())
| 529
|
Python
|
.py
| 14
| 34.428571
| 72
| 0.739216
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,828
|
oauth1_session.py
|
rembo10_headphones/lib/requests_oauthlib/oauth1_session.py
|
from __future__ import unicode_literals
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import logging
from oauthlib.common import add_params_to_uri
from oauthlib.common import urldecode as _urldecode
from oauthlib.oauth1 import SIGNATURE_HMAC, SIGNATURE_RSA, SIGNATURE_TYPE_AUTH_HEADER
import requests
from . import OAuth1
log = logging.getLogger(__name__)
def urldecode(body):
"""Parse query or json to python dictionary"""
try:
return _urldecode(body)
except Exception:
import json
return json.loads(body)
class TokenRequestDenied(ValueError):
def __init__(self, message, response):
super(TokenRequestDenied, self).__init__(message)
self.response = response
@property
def status_code(self):
"""For backwards-compatibility purposes"""
return self.response.status_code
class TokenMissing(ValueError):
def __init__(self, message, response):
super(TokenMissing, self).__init__(message)
self.response = response
class VerifierMissing(ValueError):
pass
class OAuth1Session(requests.Session):
"""Request signing and convenience methods for the oauth dance.
What is the difference between OAuth1Session and OAuth1?
OAuth1Session actually uses OAuth1 internally and its purpose is to assist
in the OAuth workflow through convenience methods to prepare authorization
URLs and parse the various token and redirection responses. It also provide
rudimentary validation of responses.
An example of the OAuth workflow using a basic CLI app and Twitter.
>>> # Credentials obtained during the registration.
>>> client_key = 'client key'
>>> client_secret = 'secret'
>>> callback_uri = 'https://127.0.0.1/callback'
>>>
>>> # Endpoints found in the OAuth provider API documentation
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> authorization_url = 'https://api.twitter.com/oauth/authorize'
>>> access_token_url = 'https://api.twitter.com/oauth/access_token'
>>>
>>> oauth_session = OAuth1Session(client_key,client_secret=client_secret, callback_uri=callback_uri)
>>>
>>> # First step, fetch the request token.
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'kjerht2309u',
'oauth_token_secret': 'lsdajfh923874',
}
>>>
>>> # Second step. Follow this link and authorize
>>> oauth_session.authorization_url(authorization_url)
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback'
>>>
>>> # Third step. Fetch the access token
>>> redirect_response = raw_input('Paste the full redirect URL here.')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
>>> oauth_session.fetch_access_token(access_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
>>> # Done. You can now make OAuth requests.
>>> status_url = 'http://api.twitter.com/1/statuses/update.json'
>>> new_status = {'status': 'hello world!'}
>>> oauth_session.post(status_url, data=new_status)
<Response [200]>
"""
def __init__(
self,
client_key,
client_secret=None,
resource_owner_key=None,
resource_owner_secret=None,
callback_uri=None,
signature_method=SIGNATURE_HMAC,
signature_type=SIGNATURE_TYPE_AUTH_HEADER,
rsa_key=None,
verifier=None,
client_class=None,
force_include_body=False,
**kwargs
):
"""Construct the OAuth 1 session.
:param client_key: A client specific identifier.
:param client_secret: A client specific secret used to create HMAC and
plaintext signatures.
:param resource_owner_key: A resource owner key, also referred to as
request token or access token depending on
when in the workflow it is used.
:param resource_owner_secret: A resource owner secret obtained with
either a request or access token. Often
referred to as token secret.
:param callback_uri: The URL the user is redirect back to after
authorization.
:param signature_method: Signature methods determine how the OAuth
signature is created. The three options are
oauthlib.oauth1.SIGNATURE_HMAC (default),
oauthlib.oauth1.SIGNATURE_RSA and
oauthlib.oauth1.SIGNATURE_PLAIN.
:param signature_type: Signature type decides where the OAuth
parameters are added. Either in the
Authorization header (default) or to the URL
query parameters or the request body. Defined as
oauthlib.oauth1.SIGNATURE_TYPE_AUTH_HEADER,
oauthlib.oauth1.SIGNATURE_TYPE_QUERY and
oauthlib.oauth1.SIGNATURE_TYPE_BODY
respectively.
:param rsa_key: The private RSA key as a string. Can only be used with
signature_method=oauthlib.oauth1.SIGNATURE_RSA.
:param verifier: A verifier string to prove authorization was granted.
:param client_class: A subclass of `oauthlib.oauth1.Client` to use with
`requests_oauthlib.OAuth1` instead of the default
:param force_include_body: Always include the request body in the
signature creation.
:param **kwargs: Additional keyword arguments passed to `OAuth1`
"""
super(OAuth1Session, self).__init__()
self._client = OAuth1(
client_key,
client_secret=client_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
callback_uri=callback_uri,
signature_method=signature_method,
signature_type=signature_type,
rsa_key=rsa_key,
verifier=verifier,
client_class=client_class,
force_include_body=force_include_body,
**kwargs
)
self.auth = self._client
@property
def token(self):
oauth_token = self._client.client.resource_owner_key
oauth_token_secret = self._client.client.resource_owner_secret
oauth_verifier = self._client.client.verifier
token_dict = {}
if oauth_token:
token_dict["oauth_token"] = oauth_token
if oauth_token_secret:
token_dict["oauth_token_secret"] = oauth_token_secret
if oauth_verifier:
token_dict["oauth_verifier"] = oauth_verifier
return token_dict
@token.setter
def token(self, value):
self._populate_attributes(value)
@property
def authorized(self):
"""Boolean that indicates whether this session has an OAuth token
or not. If `self.authorized` is True, you can reasonably expect
OAuth-protected requests to the resource to succeed. If
`self.authorized` is False, you need the user to go through the OAuth
authentication dance before OAuth-protected requests to the resource
will succeed.
"""
if self._client.client.signature_method == SIGNATURE_RSA:
# RSA only uses resource_owner_key
return bool(self._client.client.resource_owner_key)
else:
# other methods of authentication use all three pieces
return (
bool(self._client.client.client_secret)
and bool(self._client.client.resource_owner_key)
and bool(self._client.client.resource_owner_secret)
)
def authorization_url(self, url, request_token=None, **kwargs):
"""Create an authorization URL by appending request_token and optional
kwargs to url.
This is the second step in the OAuth 1 workflow. The user should be
redirected to this authorization URL, grant access to you, and then
be redirected back to you. The redirection back can either be specified
during client registration or by supplying a callback URI per request.
:param url: The authorization endpoint URL.
:param request_token: The previously obtained request token.
:param kwargs: Optional parameters to append to the URL.
:returns: The authorization URL with new parameters embedded.
An example using a registered default callback URI.
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> authorization_url = 'https://api.twitter.com/oauth/authorize'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
>>> oauth_session.authorization_url(authorization_url)
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf'
>>> oauth_session.authorization_url(authorization_url, foo='bar')
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&foo=bar'
An example using an explicit callback URI.
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> authorization_url = 'https://api.twitter.com/oauth/authorize'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret', callback_uri='https://127.0.0.1/callback')
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
>>> oauth_session.authorization_url(authorization_url)
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback'
"""
kwargs["oauth_token"] = request_token or self._client.client.resource_owner_key
log.debug("Adding parameters %s to url %s", kwargs, url)
return add_params_to_uri(url, kwargs.items())
def fetch_request_token(self, url, realm=None, **request_kwargs):
r"""Fetch a request token.
This is the first step in the OAuth 1 workflow. A request token is
obtained by making a signed post request to url. The token is then
parsed from the application/x-www-form-urlencoded response and ready
to be used to construct an authorization url.
:param url: The request token endpoint URL.
:param realm: A list of realms to request access to.
:param \*\*request_kwargs: Optional arguments passed to ''post''
function in ''requests.Session''
:returns: The response in dict format.
Note that a previously set callback_uri will be reset for your
convenience, or else signature creation will be incorrect on
consecutive requests.
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
"""
self._client.client.realm = " ".join(realm) if realm else None
token = self._fetch_token(url, **request_kwargs)
log.debug("Resetting callback_uri and realm (not needed in next phase).")
self._client.client.callback_uri = None
self._client.client.realm = None
return token
def fetch_access_token(self, url, verifier=None, **request_kwargs):
"""Fetch an access token.
This is the final step in the OAuth 1 workflow. An access token is
obtained using all previously obtained credentials, including the
verifier from the authorization step.
Note that a previously set verifier will be reset for your
convenience, or else signature creation will be incorrect on
consecutive requests.
>>> access_token_url = 'https://api.twitter.com/oauth/access_token'
>>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
>>> oauth_session.fetch_access_token(access_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
"""
if verifier:
self._client.client.verifier = verifier
if not getattr(self._client.client, "verifier", None):
raise VerifierMissing("No client verifier has been set.")
token = self._fetch_token(url, **request_kwargs)
log.debug("Resetting verifier attribute, should not be used anymore.")
self._client.client.verifier = None
return token
def parse_authorization_response(self, url):
"""Extract parameters from the post authorization redirect response URL.
:param url: The full URL that resulted from the user being redirected
back from the OAuth provider to you, the client.
:returns: A dict of parameters extracted from the URL.
>>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
"""
log.debug("Parsing token from query part of url %s", url)
token = dict(urldecode(urlparse(url).query))
log.debug("Updating internal client token attribute.")
self._populate_attributes(token)
self.token = token
return token
def _populate_attributes(self, token):
if "oauth_token" in token:
self._client.client.resource_owner_key = token["oauth_token"]
else:
raise TokenMissing(
"Response does not contain a token: {resp}".format(resp=token), token
)
if "oauth_token_secret" in token:
self._client.client.resource_owner_secret = token["oauth_token_secret"]
if "oauth_verifier" in token:
self._client.client.verifier = token["oauth_verifier"]
def _fetch_token(self, url, **request_kwargs):
log.debug("Fetching token from %s using client %s", url, self._client.client)
r = self.post(url, **request_kwargs)
if r.status_code >= 400:
error = "Token request failed with code %s, response was '%s'."
raise TokenRequestDenied(error % (r.status_code, r.text), r)
log.debug('Decoding token from response "%s"', r.text)
try:
token = dict(urldecode(r.text.strip()))
except ValueError as e:
error = (
"Unable to decode token from token response. "
"This is commonly caused by an unsuccessful request where"
" a non urlencoded error message is returned. "
"The decoding error was %s"
"" % e
)
raise ValueError(error)
log.debug("Obtained token %s", token)
log.debug("Updating internal client attributes from token data.")
self._populate_attributes(token)
self.token = token
return token
def rebuild_auth(self, prepared_request, response):
"""
When being redirected we should always strip Authorization
header, since nonce may not be reused as per OAuth spec.
"""
if "Authorization" in prepared_request.headers:
# If we get redirected to a new host, we should strip out
# any authentication headers.
prepared_request.headers.pop("Authorization", True)
prepared_request.prepare_auth(self.auth)
return
| 17,051
|
Python
|
.py
| 346
| 39.020231
| 145
| 0.641103
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,829
|
oauth2_session.py
|
rembo10_headphones/lib/requests_oauthlib/oauth2_session.py
|
from __future__ import unicode_literals
import logging
from oauthlib.common import generate_token, urldecode
from oauthlib.oauth2 import WebApplicationClient, InsecureTransportError
from oauthlib.oauth2 import LegacyApplicationClient
from oauthlib.oauth2 import TokenExpiredError, is_secure_transport
import requests
log = logging.getLogger(__name__)
class TokenUpdated(Warning):
def __init__(self, token):
super(TokenUpdated, self).__init__()
self.token = token
class OAuth2Session(requests.Session):
"""Versatile OAuth 2 extension to :class:`requests.Session`.
Supports any grant type adhering to :class:`oauthlib.oauth2.Client` spec
including the four core OAuth 2 grants.
Can be used to create authorization urls, fetch tokens and access protected
resources using the :class:`requests.Session` interface you are used to.
- :class:`oauthlib.oauth2.WebApplicationClient` (default): Authorization Code Grant
- :class:`oauthlib.oauth2.MobileApplicationClient`: Implicit Grant
- :class:`oauthlib.oauth2.LegacyApplicationClient`: Password Credentials Grant
- :class:`oauthlib.oauth2.BackendApplicationClient`: Client Credentials Grant
Note that the only time you will be using Implicit Grant from python is if
you are driving a user agent able to obtain URL fragments.
"""
def __init__(
self,
client_id=None,
client=None,
auto_refresh_url=None,
auto_refresh_kwargs=None,
scope=None,
redirect_uri=None,
token=None,
state=None,
token_updater=None,
**kwargs
):
"""Construct a new OAuth 2 client session.
:param client_id: Client id obtained during registration
:param client: :class:`oauthlib.oauth2.Client` to be used. Default is
WebApplicationClient which is useful for any
hosted application but not mobile or desktop.
:param scope: List of scopes you wish to request access to
:param redirect_uri: Redirect URI you registered as callback
:param token: Token dictionary, must include access_token
and token_type.
:param state: State string used to prevent CSRF. This will be given
when creating the authorization url and must be supplied
when parsing the authorization response.
Can be either a string or a no argument callable.
:auto_refresh_url: Refresh token endpoint URL, must be HTTPS. Supply
this if you wish the client to automatically refresh
your access tokens.
:auto_refresh_kwargs: Extra arguments to pass to the refresh token
endpoint.
:token_updater: Method with one argument, token, to be used to update
your token database on automatic token refresh. If not
set a TokenUpdated warning will be raised when a token
has been refreshed. This warning will carry the token
in its token argument.
:param kwargs: Arguments to pass to the Session constructor.
"""
super(OAuth2Session, self).__init__(**kwargs)
self._client = client or WebApplicationClient(client_id, token=token)
self.token = token or {}
self.scope = scope
self.redirect_uri = redirect_uri
self.state = state or generate_token
self._state = state
self.auto_refresh_url = auto_refresh_url
self.auto_refresh_kwargs = auto_refresh_kwargs or {}
self.token_updater = token_updater
# Ensure that requests doesn't do any automatic auth. See #278.
# The default behavior can be re-enabled by setting auth to None.
self.auth = lambda r: r
# Allow customizations for non compliant providers through various
# hooks to adjust requests and responses.
self.compliance_hook = {
"access_token_response": set(),
"refresh_token_response": set(),
"protected_request": set(),
}
def new_state(self):
"""Generates a state string to be used in authorizations."""
try:
self._state = self.state()
log.debug("Generated new state %s.", self._state)
except TypeError:
self._state = self.state
log.debug("Re-using previously supplied state %s.", self._state)
return self._state
@property
def client_id(self):
return getattr(self._client, "client_id", None)
@client_id.setter
def client_id(self, value):
self._client.client_id = value
@client_id.deleter
def client_id(self):
del self._client.client_id
@property
def token(self):
return getattr(self._client, "token", None)
@token.setter
def token(self, value):
self._client.token = value
self._client.populate_token_attributes(value)
@property
def access_token(self):
return getattr(self._client, "access_token", None)
@access_token.setter
def access_token(self, value):
self._client.access_token = value
@access_token.deleter
def access_token(self):
del self._client.access_token
@property
def authorized(self):
"""Boolean that indicates whether this session has an OAuth token
or not. If `self.authorized` is True, you can reasonably expect
OAuth-protected requests to the resource to succeed. If
`self.authorized` is False, you need the user to go through the OAuth
authentication dance before OAuth-protected requests to the resource
will succeed.
"""
return bool(self.access_token)
def authorization_url(self, url, state=None, **kwargs):
"""Form an authorization URL.
:param url: Authorization endpoint url, must be HTTPS.
:param state: An optional state string for CSRF protection. If not
given it will be generated for you.
:param kwargs: Extra parameters to include.
:return: authorization_url, state
"""
state = state or self.new_state()
return (
self._client.prepare_request_uri(
url,
redirect_uri=self.redirect_uri,
scope=self.scope,
state=state,
**kwargs
),
state,
)
def fetch_token(
self,
token_url,
code=None,
authorization_response=None,
body="",
auth=None,
username=None,
password=None,
method="POST",
force_querystring=False,
timeout=None,
headers=None,
verify=True,
proxies=None,
include_client_id=None,
client_secret=None,
**kwargs
):
"""Generic method for fetching an access token from the token endpoint.
If you are using the MobileApplicationClient you will want to use
`token_from_fragment` instead of `fetch_token`.
The current implementation enforces the RFC guidelines.
:param token_url: Token endpoint URL, must use HTTPS.
:param code: Authorization code (used by WebApplicationClients).
:param authorization_response: Authorization response URL, the callback
URL of the request back to you. Used by
WebApplicationClients instead of code.
:param body: Optional application/x-www-form-urlencoded body to add the
include in the token request. Prefer kwargs over body.
:param auth: An auth tuple or method as accepted by `requests`.
:param username: Username required by LegacyApplicationClients to appear
in the request body.
:param password: Password required by LegacyApplicationClients to appear
in the request body.
:param method: The HTTP method used to make the request. Defaults
to POST, but may also be GET. Other methods should
be added as needed.
:param force_querystring: If True, force the request body to be sent
in the querystring instead.
:param timeout: Timeout of the request in seconds.
:param headers: Dict to default request headers with.
:param verify: Verify SSL certificate.
:param proxies: The `proxies` argument is passed onto `requests`.
:param include_client_id: Should the request body include the
`client_id` parameter. Default is `None`,
which will attempt to autodetect. This can be
forced to always include (True) or never
include (False).
:param client_secret: The `client_secret` paired to the `client_id`.
This is generally required unless provided in the
`auth` tuple. If the value is `None`, it will be
omitted from the request, however if the value is
an empty string, an empty string will be sent.
:param kwargs: Extra parameters to include in the token request.
:return: A token dict
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
if not code and authorization_response:
self._client.parse_request_uri_response(
authorization_response, state=self._state
)
code = self._client.code
elif not code and isinstance(self._client, WebApplicationClient):
code = self._client.code
if not code:
raise ValueError(
"Please supply either code or " "authorization_response parameters."
)
# Earlier versions of this library build an HTTPBasicAuth header out of
# `username` and `password`. The RFC states, however these attributes
# must be in the request body and not the header.
# If an upstream server is not spec compliant and requires them to
# appear as an Authorization header, supply an explicit `auth` header
# to this function.
# This check will allow for empty strings, but not `None`.
#
# References
# 4.3.2 - Resource Owner Password Credentials Grant
# https://tools.ietf.org/html/rfc6749#section-4.3.2
if isinstance(self._client, LegacyApplicationClient):
if username is None:
raise ValueError(
"`LegacyApplicationClient` requires both the "
"`username` and `password` parameters."
)
if password is None:
raise ValueError(
"The required parameter `username` was supplied, "
"but `password` was not."
)
# merge username and password into kwargs for `prepare_request_body`
if username is not None:
kwargs["username"] = username
if password is not None:
kwargs["password"] = password
# is an auth explicitly supplied?
if auth is not None:
# if we're dealing with the default of `include_client_id` (None):
# we will assume the `auth` argument is for an RFC compliant server
# and we should not send the `client_id` in the body.
# This approach allows us to still force the client_id by submitting
# `include_client_id=True` along with an `auth` object.
if include_client_id is None:
include_client_id = False
# otherwise we may need to create an auth header
else:
# since we don't have an auth header, we MAY need to create one
# it is possible that we want to send the `client_id` in the body
# if so, `include_client_id` should be set to True
# otherwise, we will generate an auth header
if include_client_id is not True:
client_id = self.client_id
if client_id:
log.debug(
'Encoding `client_id` "%s" with `client_secret` '
"as Basic auth credentials.",
client_id,
)
client_secret = client_secret if client_secret is not None else ""
auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
if include_client_id:
# this was pulled out of the params
# it needs to be passed into prepare_request_body
if client_secret is not None:
kwargs["client_secret"] = client_secret
body = self._client.prepare_request_body(
code=code,
body=body,
redirect_uri=self.redirect_uri,
include_client_id=include_client_id,
**kwargs
)
headers = headers or {
"Accept": "application/json",
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
}
self.token = {}
request_kwargs = {}
if method.upper() == "POST":
request_kwargs["params" if force_querystring else "data"] = dict(
urldecode(body)
)
elif method.upper() == "GET":
request_kwargs["params"] = dict(urldecode(body))
else:
raise ValueError("The method kwarg must be POST or GET.")
r = self.request(
method=method,
url=token_url,
timeout=timeout,
headers=headers,
auth=auth,
verify=verify,
proxies=proxies,
**request_kwargs
)
log.debug("Request to fetch token completed with status %s.", r.status_code)
log.debug("Request url was %s", r.request.url)
log.debug("Request headers were %s", r.request.headers)
log.debug("Request body was %s", r.request.body)
log.debug("Response headers were %s and content %s.", r.headers, r.text)
log.debug(
"Invoking %d token response hooks.",
len(self.compliance_hook["access_token_response"]),
)
for hook in self.compliance_hook["access_token_response"]:
log.debug("Invoking hook %s.", hook)
r = hook(r)
self._client.parse_request_body_response(r.text, scope=self.scope)
self.token = self._client.token
log.debug("Obtained token %s.", self.token)
return self.token
def token_from_fragment(self, authorization_response):
"""Parse token from the URI fragment, used by MobileApplicationClients.
:param authorization_response: The full URL of the redirect back to you
:return: A token dict
"""
self._client.parse_request_uri_response(
authorization_response, state=self._state
)
self.token = self._client.token
return self.token
def refresh_token(
self,
token_url,
refresh_token=None,
body="",
auth=None,
timeout=None,
headers=None,
verify=True,
proxies=None,
**kwargs
):
"""Fetch a new access token using a refresh token.
:param token_url: The token endpoint, must be HTTPS.
:param refresh_token: The refresh_token to use.
:param body: Optional application/x-www-form-urlencoded body to add the
include in the token request. Prefer kwargs over body.
:param auth: An auth tuple or method as accepted by `requests`.
:param timeout: Timeout of the request in seconds.
:param headers: A dict of headers to be used by `requests`.
:param verify: Verify SSL certificate.
:param proxies: The `proxies` argument will be passed to `requests`.
:param kwargs: Extra parameters to include in the token request.
:return: A token dict
"""
if not token_url:
raise ValueError("No token endpoint set for auto_refresh.")
if not is_secure_transport(token_url):
raise InsecureTransportError()
refresh_token = refresh_token or self.token.get("refresh_token")
log.debug(
"Adding auto refresh key word arguments %s.", self.auto_refresh_kwargs
)
kwargs.update(self.auto_refresh_kwargs)
body = self._client.prepare_refresh_body(
body=body, refresh_token=refresh_token, scope=self.scope, **kwargs
)
log.debug("Prepared refresh token request body %s", body)
if headers is None:
headers = {
"Accept": "application/json",
"Content-Type": ("application/x-www-form-urlencoded;charset=UTF-8"),
}
r = self.post(
token_url,
data=dict(urldecode(body)),
auth=auth,
timeout=timeout,
headers=headers,
verify=verify,
withhold_token=True,
proxies=proxies,
)
log.debug("Request to refresh token completed with status %s.", r.status_code)
log.debug("Response headers were %s and content %s.", r.headers, r.text)
log.debug(
"Invoking %d token response hooks.",
len(self.compliance_hook["refresh_token_response"]),
)
for hook in self.compliance_hook["refresh_token_response"]:
log.debug("Invoking hook %s.", hook)
r = hook(r)
self.token = self._client.parse_request_body_response(r.text, scope=self.scope)
if not "refresh_token" in self.token:
log.debug("No new refresh token given. Re-using old.")
self.token["refresh_token"] = refresh_token
return self.token
def request(
self,
method,
url,
data=None,
headers=None,
withhold_token=False,
client_id=None,
client_secret=None,
**kwargs
):
"""Intercept all requests and add the OAuth 2 token if present."""
if not is_secure_transport(url):
raise InsecureTransportError()
if self.token and not withhold_token:
log.debug(
"Invoking %d protected resource request hooks.",
len(self.compliance_hook["protected_request"]),
)
for hook in self.compliance_hook["protected_request"]:
log.debug("Invoking hook %s.", hook)
url, headers, data = hook(url, headers, data)
log.debug("Adding token %s to request.", self.token)
try:
url, headers, data = self._client.add_token(
url, http_method=method, body=data, headers=headers
)
# Attempt to retrieve and save new access token if expired
except TokenExpiredError:
if self.auto_refresh_url:
log.debug(
"Auto refresh is set, attempting to refresh at %s.",
self.auto_refresh_url,
)
# We mustn't pass auth twice.
auth = kwargs.pop("auth", None)
if client_id and client_secret and (auth is None):
log.debug(
'Encoding client_id "%s" with client_secret as Basic auth credentials.',
client_id,
)
auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
token = self.refresh_token(
self.auto_refresh_url, auth=auth, **kwargs
)
if self.token_updater:
log.debug(
"Updating token to %s using %s.", token, self.token_updater
)
self.token_updater(token)
url, headers, data = self._client.add_token(
url, http_method=method, body=data, headers=headers
)
else:
raise TokenUpdated(token)
else:
raise
log.debug("Requesting url %s using method %s.", url, method)
log.debug("Supplying headers %s and data %s", headers, data)
log.debug("Passing through key word arguments %s.", kwargs)
return super(OAuth2Session, self).request(
method, url, headers=headers, data=data, **kwargs
)
def register_compliance_hook(self, hook_type, hook):
"""Register a hook for request/response tweaking.
Available hooks are:
access_token_response invoked before token parsing.
refresh_token_response invoked before refresh token parsing.
protected_request invoked before making a request.
If you find a new hook is needed please send a GitHub PR request
or open an issue.
"""
if hook_type not in self.compliance_hook:
raise ValueError(
"Hook type %s is not in %s.", hook_type, self.compliance_hook
)
self.compliance_hook[hook_type].add(hook)
| 21,638
|
Python
|
.py
| 474
| 33.417722
| 100
| 0.592163
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,830
|
slack.py
|
rembo10_headphones/lib/requests_oauthlib/compliance_fixes/slack.py
|
try:
from urlparse import urlparse, parse_qs
except ImportError:
from urllib.parse import urlparse, parse_qs
from oauthlib.common import add_params_to_uri
def slack_compliance_fix(session):
def _non_compliant_param_name(url, headers, data):
# If the user has already specified the token, either in the URL
# or in a data dictionary, then there's nothing to do.
# If the specified token is different from ``session.access_token``,
# we assume the user intends to override the access token.
url_query = dict(parse_qs(urlparse(url).query))
token = url_query.get("token")
if not token and isinstance(data, dict):
token = data.get("token")
if token:
# Nothing to do, just return.
return url, headers, data
if not data:
data = {"token": session.access_token}
elif isinstance(data, dict):
data["token"] = session.access_token
else:
# ``data`` is something other than a dict: maybe a stream,
# maybe a file object, maybe something else. We can't easily
# modify it, so we'll set the token by modifying the URL instead.
token = [("token", session.access_token)]
url = add_params_to_uri(url, token)
return url, headers, data
session.register_compliance_hook("protected_request", _non_compliant_param_name)
return session
| 1,453
|
Python
|
.py
| 31
| 38.064516
| 84
| 0.642655
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,831
|
weibo.py
|
rembo10_headphones/lib/requests_oauthlib/compliance_fixes/weibo.py
|
from json import loads, dumps
from oauthlib.common import to_unicode
def weibo_compliance_fix(session):
def _missing_token_type(r):
token = loads(r.text)
token["token_type"] = "Bearer"
r._content = to_unicode(dumps(token)).encode("UTF-8")
return r
session._client.default_token_placement = "query"
session.register_compliance_hook("access_token_response", _missing_token_type)
return session
| 444
|
Python
|
.py
| 11
| 34.636364
| 82
| 0.699301
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,832
|
instagram.py
|
rembo10_headphones/lib/requests_oauthlib/compliance_fixes/instagram.py
|
try:
from urlparse import urlparse, parse_qs
except ImportError:
from urllib.parse import urlparse, parse_qs
from oauthlib.common import add_params_to_uri
def instagram_compliance_fix(session):
def _non_compliant_param_name(url, headers, data):
# If the user has already specified the token in the URL
# then there's nothing to do.
# If the specified token is different from ``session.access_token``,
# we assume the user intends to override the access token.
url_query = dict(parse_qs(urlparse(url).query))
token = url_query.get("access_token")
if token:
# Nothing to do, just return.
return url, headers, data
token = [("access_token", session.access_token)]
url = add_params_to_uri(url, token)
return url, headers, data
session.register_compliance_hook("protected_request", _non_compliant_param_name)
return session
| 948
|
Python
|
.py
| 21
| 38
| 84
| 0.681128
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,833
|
plentymarkets.py
|
rembo10_headphones/lib/requests_oauthlib/compliance_fixes/plentymarkets.py
|
from json import dumps, loads
import re
from oauthlib.common import to_unicode
def plentymarkets_compliance_fix(session):
def _to_snake_case(n):
return re.sub("(.)([A-Z][a-z]+)", r"\1_\2", n).lower()
def _compliance_fix(r):
# Plenty returns the Token in CamelCase instead of _
if (
"application/json" in r.headers.get("content-type", {})
and r.status_code == 200
):
token = loads(r.text)
else:
return r
fixed_token = {}
for k, v in token.items():
fixed_token[_to_snake_case(k)] = v
r._content = to_unicode(dumps(fixed_token)).encode("UTF-8")
return r
session.register_compliance_hook("access_token_response", _compliance_fix)
return session
| 796
|
Python
|
.py
| 22
| 28.136364
| 78
| 0.59322
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,834
|
facebook.py
|
rembo10_headphones/lib/requests_oauthlib/compliance_fixes/facebook.py
|
from json import dumps
try:
from urlparse import parse_qsl
except ImportError:
from urllib.parse import parse_qsl
from oauthlib.common import to_unicode
def facebook_compliance_fix(session):
def _compliance_fix(r):
# if Facebook claims to be sending us json, let's trust them.
if "application/json" in r.headers.get("content-type", {}):
return r
# Facebook returns a content-type of text/plain when sending their
# x-www-form-urlencoded responses, along with a 200. If not, let's
# assume we're getting JSON and bail on the fix.
if "text/plain" in r.headers.get("content-type", {}) and r.status_code == 200:
token = dict(parse_qsl(r.text, keep_blank_values=True))
else:
return r
expires = token.get("expires")
if expires is not None:
token["expires_in"] = expires
token["token_type"] = "Bearer"
r._content = to_unicode(dumps(token)).encode("UTF-8")
return r
session.register_compliance_hook("access_token_response", _compliance_fix)
return session
| 1,119
|
Python
|
.py
| 26
| 35.461538
| 86
| 0.654696
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,835
|
fitbit.py
|
rembo10_headphones/lib/requests_oauthlib/compliance_fixes/fitbit.py
|
"""
The Fitbit API breaks from the OAuth2 RFC standard by returning an "errors"
object list, rather than a single "error" string. This puts hooks in place so
that oauthlib can process an error in the results from access token and refresh
token responses. This is necessary to prevent getting the generic red herring
MissingTokenError.
"""
from json import loads, dumps
from oauthlib.common import to_unicode
def fitbit_compliance_fix(session):
def _missing_error(r):
token = loads(r.text)
if "errors" in token:
# Set the error to the first one we have
token["error"] = token["errors"][0]["errorType"]
r._content = to_unicode(dumps(token)).encode("UTF-8")
return r
session.register_compliance_hook("access_token_response", _missing_error)
session.register_compliance_hook("refresh_token_response", _missing_error)
return session
| 905
|
Python
|
.py
| 20
| 40.4
| 79
| 0.727273
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,836
|
__init__.py
|
rembo10_headphones/lib/requests_oauthlib/compliance_fixes/__init__.py
|
from __future__ import absolute_import
from .facebook import facebook_compliance_fix
from .fitbit import fitbit_compliance_fix
from .linkedin import linkedin_compliance_fix
from .slack import slack_compliance_fix
from .instagram import instagram_compliance_fix
from .mailchimp import mailchimp_compliance_fix
from .weibo import weibo_compliance_fix
from .plentymarkets import plentymarkets_compliance_fix
| 406
|
Python
|
.py
| 9
| 44
| 55
| 0.858586
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,837
|
linkedin.py
|
rembo10_headphones/lib/requests_oauthlib/compliance_fixes/linkedin.py
|
from json import loads, dumps
from oauthlib.common import add_params_to_uri, to_unicode
def linkedin_compliance_fix(session):
def _missing_token_type(r):
token = loads(r.text)
token["token_type"] = "Bearer"
r._content = to_unicode(dumps(token)).encode("UTF-8")
return r
def _non_compliant_param_name(url, headers, data):
token = [("oauth2_access_token", session.access_token)]
url = add_params_to_uri(url, token)
return url, headers, data
session._client.default_token_placement = "query"
session.register_compliance_hook("access_token_response", _missing_token_type)
session.register_compliance_hook("protected_request", _non_compliant_param_name)
return session
| 749
|
Python
|
.py
| 16
| 40.5
| 84
| 0.695055
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,838
|
mailchimp.py
|
rembo10_headphones/lib/requests_oauthlib/compliance_fixes/mailchimp.py
|
import json
from oauthlib.common import to_unicode
def mailchimp_compliance_fix(session):
def _null_scope(r):
token = json.loads(r.text)
if "scope" in token and token["scope"] is None:
token.pop("scope")
r._content = to_unicode(json.dumps(token)).encode("utf-8")
return r
def _non_zero_expiration(r):
token = json.loads(r.text)
if "expires_in" in token and token["expires_in"] == 0:
token["expires_in"] = 3600
r._content = to_unicode(json.dumps(token)).encode("utf-8")
return r
session.register_compliance_hook("access_token_response", _null_scope)
session.register_compliance_hook("access_token_response", _non_zero_expiration)
return session
| 757
|
Python
|
.py
| 18
| 34.777778
| 83
| 0.651226
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,839
|
douban.py
|
rembo10_headphones/lib/requests_oauthlib/compliance_fixes/douban.py
|
import json
from oauthlib.common import to_unicode
def douban_compliance_fix(session):
def fix_token_type(r):
token = json.loads(r.text)
token.setdefault("token_type", "Bearer")
fixed_token = json.dumps(token)
r._content = to_unicode(fixed_token).encode("utf-8")
return r
session._client_default_token_placement = "query"
session.register_compliance_hook("access_token_response", fix_token_type)
return session
| 472
|
Python
|
.py
| 12
| 33.25
| 77
| 0.696703
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,840
|
user.py
|
rembo10_headphones/lib/pygazelle/user.py
|
class InvalidUserException(Exception):
pass
class User(object):
"""
This class represents a User, whether your own or someone else's. It is created knowing only its ID. To reduce
API accesses, load information using User.update_index_data() or User.update_user_data only as needed.
"""
def __init__(self, id, parent_api):
self.id = id
self.parent_api = parent_api
self.username = None
self.authkey = None
self.passkey = None
self.avatar = None
self.is_friend = None
self.profile_text = None
self.notifications = None
self.stats = None
self.ranks = None
self.personal = None
self.community = None
self.parent_api.cached_users[self.id] = self # add self to cache of known User objects
def update_index_data(self):
"""
Calls 'index' API action, then updates this User objects information with it.
NOTE: Only call if this user is the logged-in user...throws InvalidUserException otherwise.
"""
response = self.parent_api.request(action='index')
self.set_index_data(response)
def set_index_data(self, index_json_response):
"""
Takes parsed JSON response from 'index' action on api, and updates the available subset of user information.
ONLY callable if this User object represents the currently logged in user. Throws InvalidUserException otherwise.
"""
if self.id != index_json_response['id']:
raise InvalidUserException("Tried to update non-logged-in User's information from 'index' API call." +
" Should be %s, got %s" % (self.id, index_json_response['id']) )
self.username = index_json_response['username']
self.authkey = index_json_response['authkey']
self.passkey = index_json_response['passkey']
self.notifications = index_json_response['notifications']
if self.stats:
self.stats = dict(list(self.stats.items()) + list(index_json_response['userstats'].items())) # merge in new info
else:
self.stats = index_json_response['userstats']
# cross pollinate some data that is located in multiple locations in API
if self.personal:
self.personal['class'] = self.stats['class']
self.personal['passkey'] = self.passkey
def update_user_data(self):
response = self.parent_api.request(action='user', id=self.id)
self.set_user_data(response)
def set_user_data(self, user_json_response):
"""
Takes parsed JSON response from 'user' action on api, and updates relevant user information.
To avoid problems, only pass in user data from an API call that used this user's ID as an argument.
"""
if self.username and self.username != user_json_response['username']:
raise InvalidUserException("Tried to update a user's information from a 'user' API call with a different username." +
" Should be %s, got %s" % (self.username, user_json_response['username']) )
self.username = user_json_response['username']
self.avatar = user_json_response['avatar']
self.is_friend = user_json_response['isFriend']
self.profile_text = user_json_response['profileText']
if self.stats:
self.stats = dict(list(self.stats.items()) + list(user_json_response['stats'].items())) # merge in new info
else:
self.stats = user_json_response['stats']
self.ranks = user_json_response['ranks']
self.personal = user_json_response['personal']
self.community = user_json_response['community']
# cross pollinate some data that is located in multiple locations in API
self.stats['class'] = self.personal['class']
self.passkey = self.personal['passkey']
def set_search_result_data(self, search_result_item):
"""
Takes a single user result item from a 'usersearch' API call and updates user info.
"""
if self.id != search_result_item['userId']:
raise InvalidUserException("Tried to update existing user with another user's search result data (IDs don't match).")
self.username = search_result_item['username']
if not self.personal:
self.personal = {}
self.personal['donor'] = search_result_item['donor']
self.personal['warned'] = search_result_item['warned']
self.personal['enabled'] = search_result_item['enabled']
self.personal['class'] = search_result_item['class']
def __repr__(self):
return "User: %s - ID: %s" % (self.username, self.id)
| 4,755
|
Python
|
.py
| 89
| 43.786517
| 129
| 0.641274
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,841
|
category.py
|
rembo10_headphones/lib/pygazelle/category.py
|
class InvalidCategoryException(Exception):
pass
class Category(object):
def __init__(self, id, parent_api):
self.id = id
self.parent_api = parent_api
self.name = None
self.parent_api.cached_categories[self.id] = self # add self to cache of known Category objects
def __repr__(self):
return "Category: %s - id: %s" % (self.name, self.id)
| 391
|
Python
|
.py
| 10
| 32.7
| 103
| 0.641161
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,842
|
api.py
|
rembo10_headphones/lib/pygazelle/api.py
|
#!/usr/bin/env python
#
# PyGazelle - https://github.com/cohena/pygazelle
# A Python implementation of the What.cd Gazelle JSON API
#
# Loosely based on the API implementation from 'whatbetter', by Zachary Denton
# See https://github.com/zacharydenton/whatbetter
import html
import sys
import json
import time
import requests as requests
import headphones
from .user import User
from .artist import Artist
from .tag import Tag
from .request import Request
from .torrent_group import TorrentGroup
from .torrent import Torrent
from .category import Category
from .inbox import Mailbox
class LoginException(Exception):
pass
class RequestException(Exception):
pass
class GazelleAPI(object):
last_request = time.time() # share amongst all api objects
default_headers = {
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'User-Agent': 'Headphones/%s' % headphones.CURRENT_VERSION,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9'\
',*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3'}
def __init__(self, username=None, password=None, url=None):
self.session = requests.session()
self.session.headers = self.default_headers
self.username = username
self.password = password
self.authkey = None
self.passkey = None
self.userid = None
self.logged_in_user = None
self.default_timeout = 30
self.cached_users = {}
self.cached_artists = {}
self.cached_tags = {}
self.cached_torrent_groups = {}
self.cached_torrents = {}
self.cached_requests = {}
self.cached_categories = {}
self.site = url + "/"
self.past_request_timestamps = []
def wait_for_rate_limit(self):
# maximum is 5 requests within 10 secs
time_frame = 10
max_reqs = 5
slice_point = 0
while len(self.past_request_timestamps) >= max_reqs:
for i, timestamp in enumerate(self.past_request_timestamps):
if timestamp < time.time() - time_frame:
slice_point = i + 1
else:
break
if slice_point:
self.past_request_timestamps = self.past_request_timestamps[slice_point:]
else:
time.sleep(0.1)
def logged_in(self):
return self.logged_in_user is not None and self.logged_in_user.id == self.userid
def _login(self):
"""
Private method.
Logs in user and gets authkey from server.
"""
if self.logged_in():
return
self.wait_for_rate_limit()
loginpage = self.site + 'login.php'
data = {'username': self.username,
'password': self.password,
'keeplogged': '1'}
r = self.session.post(loginpage, data=data, timeout=self.default_timeout, headers=self.default_headers)
self.past_request_timestamps.append(time.time())
if r.status_code != 200:
raise LoginException("Login returned status code %s" % r.status_code)
try:
accountinfo = self.request('index', autologin=False)
except RequestException as e:
raise LoginException("Login probably incorrect")
if not accountinfo or 'id' not in accountinfo:
raise LoginException("Login probably incorrect")
self.userid = accountinfo['id']
self.authkey = accountinfo['authkey']
self.passkey = accountinfo['passkey']
self.logged_in_user = User(self.userid, self)
self.logged_in_user.set_index_data(accountinfo)
def request(self, action, autologin=True, **kwargs):
"""
Makes an AJAX request at a given action.
Pass an action and relevant arguments for that action.
"""
def make_request(action, **kwargs):
ajaxpage = 'ajax.php'
content = self.unparsed_request(ajaxpage, action, **kwargs)
try:
if not isinstance(content, text_type):
content = content.decode('utf-8')
parsed = json.loads(content)
if parsed['status'] != 'success':
raise RequestException
return parsed['response']
except ValueError:
raise RequestException
try:
return make_request(action, **kwargs)
except Exception as e:
if autologin and not self.logged_in():
self._login()
return make_request(action, **kwargs)
else:
raise e
def unparsed_request(self, sitepage, action, **kwargs):
"""
Makes a generic HTTP request at a given page with a given action.
Also pass relevant arguments for that action.
"""
self.wait_for_rate_limit()
url = "%s%s" % (self.site, sitepage)
params = {'action': action}
if self.authkey:
params['auth'] = self.authkey
params.update(kwargs)
r = self.session.get(url, params=params, allow_redirects=False, timeout=self.default_timeout)
if r.status_code == 302 and r.raw.headers['location'] == 'login.php':
self.logged_in_user = None
raise LoginException("User login expired")
self.past_request_timestamps.append(time.time())
return r.content
def get_user(self, id):
"""
Returns a User for the passed ID, associated with this API object. If the ID references the currently logged in
user, the user returned will be pre-populated with the information from an 'index' API call. Otherwise, you'll
need to call User.update_user_data(). This is done on demand to reduce unnecessary API calls.
"""
id = int(id)
if id == self.userid:
return self.logged_in_user
elif id in list(self.cached_users.keys()):
return self.cached_users[id]
else:
return User(id, self)
def search_users(self, search_query):
"""
Returns a list of users returned for the search query. You can search by name, part of name, and ID number. If
one of the returned users is the currently logged-in user, that user object will be pre-populated with the
information from an 'index' API call. Otherwise only the limited info returned by the search will be pre-pop'd.
You can query more information with User.update_user_data(). This is done on demand to reduce unnecessary API calls.
"""
response = self.request(action='usersearch', search=search_query)
results = response['results']
found_users = []
for result in results:
user = self.get_user(result['userId'])
user.set_search_result_data(result)
found_users.append(user)
return found_users
def get_inbox(self, page='1', sort='unread'):
"""
Returns the inbox Mailbox for the logged in user
"""
return Mailbox(self, 'inbox', page, sort)
def get_sentbox(self, page='1', sort='unread'):
"""
Returns the sentbox Mailbox for the logged in user
"""
return Mailbox(self, 'sentbox', page, sort)
def get_artist(self, id=None, name=None):
"""
Returns an Artist for the passed ID, associated with this API object. You'll need to call Artist.update_data()
if the artist hasn't already been cached. This is done on demand to reduce unnecessary API calls.
"""
if id:
id = int(id)
if id in list(self.cached_artists.keys()):
artist = self.cached_artists[id]
else:
artist = Artist(id, self)
if name:
artist.name = html.unescape(name)
elif name:
artist = Artist(-1, self)
artist.name = html.unescape(name)
else:
raise Exception("You must specify either an ID or a Name to get an artist.")
return artist
def get_tag(self, name):
"""
Returns a Tag for the passed name, associated with this API object. If you know the count value for this tag,
pass it to update the object. There is no way to query the count directly from the API, but it can be retrieved
from other calls such as 'artist', however.
"""
if name in list(self.cached_tags.keys()):
return self.cached_tags[name]
else:
return Tag(name, self)
def get_request(self, id):
"""
Returns a Request for the passed ID, associated with this API object. You'll need to call Request.update_data()
if the request hasn't already been cached. This is done on demand to reduce unnecessary API calls.
"""
id = int(id)
if id in list(self.cached_requests.keys()):
return self.cached_requests[id]
else:
return Request(id, self)
def get_torrent_group(self, id):
"""
Returns a TorrentGroup for the passed ID, associated with this API object.
"""
id = int(id)
if id in list(self.cached_torrent_groups.keys()):
return self.cached_torrent_groups[id]
else:
return TorrentGroup(id, self)
def get_torrent(self, id):
"""
Returns a Torrent for the passed ID, associated with this API object.
"""
id = int(id)
if id in list(self.cached_torrents.keys()):
return self.cached_torrents[id]
else:
return Torrent(id, self)
def get_torrent_from_info_hash(self, info_hash):
"""
Returns a Torrent for the passed info hash (if one exists), associated with this API object.
"""
try:
response = self.request(action='torrent', hash=info_hash.upper())
except RequestException:
return None
id = int(response['torrent']['id'])
if id in list(self.cached_torrents.keys()):
torrent = self.cached_torrents[id]
else:
torrent = Torrent(id, self)
torrent.set_torrent_complete_data(response)
return torrent
def get_category(self, id, name=None):
"""
Returns a Category for the passed ID, associated with this API object.
"""
id = int(id)
if id in list(self.cached_categories.keys()):
cat = self.cached_categories[id]
else:
cat = Category(id, self)
if name:
cat.name = name
return cat
def get_top_10(self, type="torrents", limit=25):
"""
Lists the top <limit> items of <type>. Type can be "torrents", "tags", or "users". Limit MUST be
10, 25, or 100...it can't just be an arbitrary number (unfortunately). Results are organized into a list of hashes.
Each hash contains the results for a specific time frame, like 'day', or 'week'. In the hash, the 'results' key
contains a list of objects appropriate to the passed <type>.
"""
response = self.request(action='top10', type=type, limit=limit)
top_items = []
if not response:
raise RequestException
for category in response:
results = []
if type == "torrents":
for item in category['results']:
torrent = self.get_torrent(item['torrentId'])
torrent.set_torrent_top_10_data(item)
results.append(torrent)
elif type == "tags":
for item in category['results']:
tag = self.get_tag(item['name'])
results.append(tag)
elif type == "users":
for item in category['results']:
user = self.get_user(item['id'])
results.append(user)
else:
raise Exception("%s is an invalid type argument for GazelleAPI.get_top_ten()" % type)
top_items.append({
"caption": category['caption'],
"tag": category['tag'],
"limit": category['limit'],
"results": results
})
return top_items
def search_torrents(self, **kwargs):
"""
Searches based on the args you pass and returns torrent groups filled with torrents.
Pass strings unless otherwise specified.
Valid search args:
searchstr (any arbitrary string to search for)
page (page to display -- default: 1)
artistname (self explanatory)
groupname (torrent group name, equivalent to album)
recordlabel (self explanatory)
cataloguenumber (self explanatory)
year (self explanatory)
remastertitle (self explanatory)
remasteryear (self explanatory)
remasterrecordlabel (self explanatory)
remastercataloguenumber (self explanatory)
filelist (can search for filenames found in torrent...unsure of formatting for multiple files)
encoding (use constants in pygazelle.Encoding module)
format (use constants in pygazelle.Format module)
media (use constants in pygazelle.Media module)
releasetype (use constants in pygazelle.ReleaseType module)
haslog (int 1 or 0 to represent boolean, 100 for 100% only, -1 for < 100% / unscored)
hascue (int 1 or 0 to represent boolean)
scene (int 1 or 0 to represent boolean)
vanityhouse (int 1 or 0 to represent boolean)
freetorrent (int 1 or 0 to represent boolean)
taglist (comma separated tag names)
tags_type (0 for 'any' matching, 1 for 'all' matching)
order_by (use constants in pygazelle.order module that start with by_ in their name)
order_way (use way_ascending or way_descending constants in pygazelle.order)
filter_cat (for each category you want to search, the param name must be filter_cat[catnum] and the value 1)
ex. filter_cat[1]=1 turns on Music.
filter_cat[1]=1, filter_cat[2]=1 turns on music and applications. (two separate params and vals!)
Category object ids return the correct int value for these. (verify?)
Returns a dict containing keys 'curr_page', 'pages', and 'results'. Results contains a matching list of Torrents
(they have a reference to their parent TorrentGroup).
"""
response = self.request(action='browse', **kwargs)
results = response['results']
if len(results):
curr_page = response['currentPage']
pages = response['pages']
else:
curr_page = 1
pages = 1
matching_torrents = []
for torrent_group_dict in results:
torrent_group = self.get_torrent_group(torrent_group_dict['groupId'])
torrent_group.set_torrent_search_data(torrent_group_dict)
for torrent_dict in torrent_group_dict['torrents']:
torrent_dict['groupId'] = torrent_group.id
torrent = self.get_torrent(torrent_dict['torrentId'])
torrent.set_torrent_search_data(torrent_dict)
matching_torrents.append(torrent)
return {'curr_page': curr_page, 'pages': pages, 'results': matching_torrents}
def generate_torrent_link(self, id, use_token=False):
url = "%storrents.php?action=download&id=%s&authkey=%s&torrent_pass=%s&usetoken=%d" %\
(self.site, id, self.logged_in_user.authkey, self.logged_in_user.passkey, use_token)
return url
def save_torrent_file(self, id, dest, use_token=False):
file_data = self.unparsed_request("torrents.php", 'download',
id=id, authkey=self.logged_in_user.authkey, torrent_pass=self.logged_in_user.passkey,
usetoken=int(use_token))
with open(dest, 'w+') as dest_file:
dest_file.write(file_data)
if sys.version_info[0] == 3:
text_type = str
else:
text_type = str
| 16,389
|
Python
|
.py
| 367
| 34.228883
| 124
| 0.606137
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,843
|
format.py
|
rembo10_headphones/lib/pygazelle/format.py
|
MP3 = "MP3"
FLAC = "FLAC"
AAC = "AAC"
AC3 = "AC3"
DTS = "DTS"
OGG_VORBIS = "Ogg Vorbis"
ALL_FORMATS = [MP3, FLAC, AAC, AC3, DTS, OGG_VORBIS]
| 141
|
Python
|
.py
| 7
| 19.142857
| 52
| 0.634328
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,844
|
request.py
|
rembo10_headphones/lib/pygazelle/request.py
|
class InvalidRequestException(Exception):
pass
class Request(object):
def __init__(self, id, parent_api):
self.id = id
self.parent_api = parent_api
self.category = None
self.title = None
self.year = None
self.time_added = None
self.votes = None
self.bounty = None
self.parent_api.cached_requests[self.id] = self # add self to cache of known Request objects
def set_data(self, request_item_json_data):
if self.id != request_item_json_data['requestId']:
raise InvalidRequestException("Tried to update a Request's information from a request JSON item with a different id." +
" Should be %s, got %s" % (self.id, request_item_json_data['requestId']) )
self.category = self.parent_api.get_category(request_item_json_data['categoryId'])
self.title = request_item_json_data['title']
self.year = request_item_json_data['year']
self.time_added = request_item_json_data['timeAdded']
self.votes = request_item_json_data['votes']
self.bounty = request_item_json_data['bounty']
def __repr__(self):
return "Request: %s - ID: %s" % (self.title, self.id)
| 1,246
|
Python
|
.py
| 25
| 40.52
| 131
| 0.625616
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,845
|
encoding.py
|
rembo10_headphones/lib/pygazelle/encoding.py
|
C192 = "192"
APS = "APS (VBR)"
V2 = "V2 (VBR)"
V1 = "V1 (VBR)"
C256 = "256"
APX = "APX (VBR)"
V0 = "V0 (VBR)"
C320 = "320"
LOSSLESS = "Lossless"
LOSSLESS_24 = "24bit Lossless"
V8 = "V8 (VBR)"
ALL_ENCODINGS = [C192, APS, V2, V1, C256, APX, V0, C320, LOSSLESS, LOSSLESS_24, V8]
| 276
|
Python
|
.py
| 12
| 22
| 83
| 0.613636
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,846
|
release_type.py
|
rembo10_headphones/lib/pygazelle/release_type.py
|
ALBUM = "Album"
SOUNDTRACK = "Soundtrack"
EP = "EP"
ANTHOLOGY = "Anthology"
COMPILATION = "Compilation"
DJ_MIX = "DJ Mix"
SINGLE = "Single"
LIVE_ALBUM = "Live album"
REMIX = "Remix"
BOOTLEG = "Bootleg"
INTERVIEW = "Interview"
MIXTAPE = "Mixtape"
UNKNOWN = "Unknown"
ALL_RELEASE_TYPES = [ALBUM, SOUNDTRACK, EP, ANTHOLOGY, COMPILATION, DJ_MIX, SINGLE, LIVE_ALBUM, REMIX, BOOTLEG,
INTERVIEW, MIXTAPE, UNKNOWN]
def get_int_val(release_type):
return ALL_RELEASE_TYPES.index(release_type) + 1
| 513
|
Python
|
.py
| 17
| 27.647059
| 111
| 0.711111
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,847
|
artist.py
|
rembo10_headphones/lib/pygazelle/artist.py
|
import html
class InvalidArtistException(Exception):
pass
class Artist(object):
"""
This class represents an Artist. It is created knowing only its ID. To reduce API accesses, load information using
Artist.update_data() only as needed.
"""
def __init__(self, id, parent_api):
self.id = id
self.parent_api = parent_api
self.name = None
self.notifications_enabled = None
self.has_bookmarked = None
self.image = None
self.body = None
self.vanity_house = None
self.tags = []
self.similar_artists_and_score = {}
self.statistics = None
self.torrent_groups = []
self.requests = []
self.parent_api.cached_artists[self.id] = self # add self to cache of known Artist objects
def update_data(self):
if self.id > 0:
response = self.parent_api.request(action='artist', id=self.id)
elif self.name:
self.name = html.unescape(self.name)
try:
response = self.parent_api.request(action='artist', artistname=self.name)
except Exception:
self.name = self.name.split(" & ")[0]
response = self.parent_api.request(action='artist', artistname=self.name)
else:
raise InvalidArtistException("Neither ID or Artist Name is valid, can't update data.")
self.set_data(response)
def set_data(self, artist_json_response):
if self.id > 0 and self.id != artist_json_response['id']:
raise InvalidArtistException("Tried to update an artists's information from an 'artist' API call with a different id." +
" Should be %s, got %s" % (self.id, artist_json_response['id']) )
elif self.name:
self.id = artist_json_response['id']
self.parent_api.cached_artists[self.id] = self
self.name = html.unescape(artist_json_response['name'])
self.notifications_enabled = artist_json_response['notificationsEnabled']
self.has_bookmarked = artist_json_response['hasBookmarked']
self.image = artist_json_response['image']
self.body = artist_json_response['body']
self.vanity_house = artist_json_response['vanityHouse']
self.tags = []
for tag_dict in artist_json_response['tags']:
tag = self.parent_api.get_tag(tag_dict['name'])
tag.set_artist_count(self, tag_dict['count'])
self.tags.append(tag)
self.similar_artists_and_score = {}
for similar_artist_dict in artist_json_response['similarArtists']:
similar_artist = self.parent_api.get_artist(similar_artist_dict['artistId'])
similar_artist.name = similar_artist_dict['name']
self.similar_artists_and_score[similar_artist] = similar_artist_dict['score']
self.statistics = artist_json_response['statistics']
self.torrent_groups = []
for torrent_group_item in artist_json_response['torrentgroup']:
torrent_group = self.parent_api.get_torrent_group(torrent_group_item['groupId'])
torrent_group.set_artist_group_data(torrent_group_item)
self.torrent_groups.append(torrent_group)
self.requests = []
for request_json_item in artist_json_response['requests']:
request = self.parent_api.get_request(request_json_item['requestId'])
request.set_data(request_json_item)
self.requests.append(request)
def __repr__(self):
return "Artist: %s - ID: %s" % (self.name, self.id)
| 3,612
|
Python
|
.py
| 72
| 40.055556
| 132
| 0.62943
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,848
|
torrent_group.py
|
rembo10_headphones/lib/pygazelle/torrent_group.py
|
from .torrent import Torrent
class InvalidTorrentGroupException(Exception):
pass
class TorrentGroup(object):
"""
Represents a Torrent Group (usually an album). Note that TorrentGroup.torrents may not be comprehensive if you
haven't called TorrentGroup.update_group_data()...it may have only been populated with filtered search results.
Check TorrentGroup.has_complete_torrent_list (boolean) to be sure.
"""
def __init__(self, id, parent_api):
self.id = id
self.parent_api = parent_api
self.name = None
self.wiki_body = None
self.wiki_image = None
self.year = None
self.record_label = None
self.catalogue_number = None
self.tags = []
self.release_type = None
self.vanity_house = None
self.has_bookmarked = None
self.category = None
self.time = None
self.music_info = None
self.torrents = []
self.has_complete_torrent_list = False
self.parent_api.cached_torrent_groups[self.id] = self
def update_group_data(self):
response = self.parent_api.request(action='torrentgroup', id=self.id)
self.set_group_data(response)
def set_group_data(self, torrent_group_json_response):
"""
Takes parsed JSON response from 'torrentgroup' action on api, and updates relevant information.
To avoid problems, only pass in data from an API call that used this torrentgroup's ID as an argument.
"""
if self.id != torrent_group_json_response['group']['id']:
raise InvalidTorrentGroupException("Tried to update a TorrentGroup's information from an 'artist' API call with a different id." +
" Should be %s, got %s" % (self.id, torrent_group_json_response['group']['groupId']) )
self.name = torrent_group_json_response['group']['name']
self.year = torrent_group_json_response['group']['year']
self.wiki_body = torrent_group_json_response['group']['wikiBody']
self.wiki_image = torrent_group_json_response['group']['wikiImage']
self.record_label = torrent_group_json_response['group']['recordLabel']
self.catalogue_number = torrent_group_json_response['group']['catalogueNumber']
self.release_type = torrent_group_json_response['group']['releaseType']
self.category = self.parent_api.get_category(torrent_group_json_response['group']['categoryId'],
torrent_group_json_response['group']['categoryName'])
self.time = torrent_group_json_response['group']['time']
self.vanity_house = torrent_group_json_response['group']['vanityHouse']
self.music_info = torrent_group_json_response['group']['musicInfo']
self.music_info['artists'] = [ self.parent_api.get_artist(artist['id'], artist['name'])
for artist in self.music_info['artists'] ]
self.music_info['with'] = [ self.parent_api.get_artist(artist['id'], artist['name'])
for artist in self.music_info['with'] ]
if 'torrents' in torrent_group_json_response:
self.torrents = []
for torrent_dict in torrent_group_json_response['torrents']:
torrent_dict['groupId'] = self.id
torrent = self.parent_api.get_torrent(torrent_dict['id'])
torrent.set_torrent_group_data(torrent_dict)
self.torrents.append(torrent)
self.has_complete_torrent_list = True
elif 'torrent' in torrent_group_json_response:
torrent = self.parent_api.get_torrent(torrent_group_json_response['torrent']['id'])
self.torrents.append(torrent)
def set_artist_group_data(self, artist_group_json_response):
"""
Takes torrentgroup section from parsed JSON response from 'artist' action on api, and updates relevant information.
"""
if self.id != artist_group_json_response['groupId']:
raise InvalidTorrentGroupException("Tried to update a TorrentGroup's information from an 'artist' API call with a different id." +
" Should be %s, got %s" % (self.id, artist_group_json_response['groupId']) )
self.name = artist_group_json_response['groupName']
self.year = artist_group_json_response['groupYear']
self.record_label = artist_group_json_response['groupRecordLabel']
self.catalogue_number = artist_group_json_response['groupCatalogueNumber']
self.tags = []
for tag_name in artist_group_json_response['tags']:
tag = self.parent_api.get_tag(tag_name)
self.tags.append(tag)
self.release_type = artist_group_json_response['releaseType']
self.has_bookmarked = artist_group_json_response['hasBookmarked']
self.torrents = []
for torrent_dict in artist_group_json_response['torrent']:
torrent = self.parent_api.get_torrent(torrent_dict['id'])
torrent.set_torrent_artist_data(torrent_dict)
self.torrents.append(torrent)
self.has_complete_torrent_list = True
def set_torrent_search_data(self, search_json_response):
if self.id != search_json_response['groupId']:
raise InvalidTorrentGroupException("Tried to update a TorrentGroup's information from an 'browse'/search API call with a different id." +
" Should be %s, got %s" % (self.id, search_json_response['groupId']) )
self.name = search_json_response['groupName']
# purposefully ignoring search_json_response['artist']...the other data updates don't include it, would just get confusing
self.tags = []
for tag_name in search_json_response['tags']:
tag = self.parent_api.get_tag(tag_name)
self.tags.append(tag)
# some of the below keys aren't in things like comics...should probably watch out for this elsewhere
if 'bookmarked' in list(search_json_response.keys()):
self.has_bookmarked = search_json_response['bookmarked']
if 'vanityHouse' in list(search_json_response.keys()):
self.vanity_house = search_json_response['vanityHouse']
if 'groupYear' in list(search_json_response.keys()):
self.year = search_json_response['groupYear']
if 'releaseType' in list(search_json_response.keys()):
self.release_type = search_json_response['releaseType']
self.time = search_json_response['groupTime']
if 'torrentId' in list(search_json_response.keys()):
search_json_response['torrents'] = [{'torrentId': search_json_response['torrentId']}]
new_torrents = []
for torrent_dict in search_json_response['torrents']:
torrent_dict['groupId'] = self.id
torrent = self.parent_api.get_torrent(torrent_dict['torrentId'])
new_torrents.append(torrent)
# torrent information gets populated in API search call, no need to duplicate that here
self.torrents = self.torrents + new_torrents
def __repr__(self):
return "TorrentGroup: %s - ID: %s" % (self.name, self.id)
| 7,282
|
Python
|
.py
| 120
| 49.425
| 149
| 0.640767
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,849
|
tag.py
|
rembo10_headphones/lib/pygazelle/tag.py
|
class Tag(object):
def __init__(self, name, parent_api):
self.name = name
self.artist_counts = {}
self.parent_api = parent_api
self.parent_api.cached_tags[self.name] = self # add self to cache of known Tag objects
def set_artist_count(self, artist, count):
"""
Adds an artist to the known list of artists tagged with this tag (if necessary), and sets the count of times
that that artist has been known to be tagged with this tag.
"""
self.artist_counts[artist] = count
def __repr__(self):
return "Tag: %s" % self.name
| 612
|
Python
|
.py
| 14
| 36
| 116
| 0.627517
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,850
|
torrent.py
|
rembo10_headphones/lib/pygazelle/torrent.py
|
from html.parser import HTMLParser
import re
class InvalidTorrentException(Exception):
pass
class Torrent(object):
def __init__(self, id, parent_api):
self.id = id
self.parent_api = parent_api
self.group = None
self.media = None
self.format = None
self.encoding = None
self.remaster_year = None
self.remastered = None
self.remaster_title = None
self.remaster_record_label = None
self.remaster_catalogue_number = None
self.scene = None
self.has_log = None
self.has_cue = None
self.log_score = None
self.file_count = None
self.free_torrent = None
self.size = None
self.leechers = None
self.seeders = None
self.snatched = None
self.time = None
self.has_file = None
self.description = None
self.file_list = []
self.file_path = None
self.user = None
self.parent_api.cached_torrents[self.id] = self
def set_torrent_complete_data(self, torrent_json_response):
if self.id != torrent_json_response['torrent']['id']:
raise InvalidTorrentException("Tried to update a Torrent's information from an 'artist' API call with a different id." +
" Should be %s, got %s" % (self.id, torrent_json_response['id']) )
self.group = self.parent_api.get_torrent_group(torrent_json_response['group']['id'])
had_complete_list = self.group.has_complete_torrent_list
self.group.set_group_data(torrent_json_response)
self.group.has_complete_torrent_list = had_complete_list
self.media = torrent_json_response['torrent']['media']
self.format = torrent_json_response['torrent']['format']
self.encoding = torrent_json_response['torrent']['encoding']
self.remaster_year = torrent_json_response['torrent']['remasterYear']
self.remastered = torrent_json_response['torrent']['remastered']
self.remaster_title = torrent_json_response['torrent']['remasterTitle']
self.remaster_record_label = torrent_json_response['torrent']['remasterRecordLabel']
self.scene = torrent_json_response['torrent']['scene']
self.has_log = torrent_json_response['torrent']['hasLog']
self.has_cue = torrent_json_response['torrent']['hasCue']
self.log_score = torrent_json_response['torrent']['logScore']
self.file_count = torrent_json_response['torrent']['fileCount']
self.free_torrent = torrent_json_response['torrent']['freeTorrent']
self.size = torrent_json_response['torrent']['size']
self.leechers = torrent_json_response['torrent']['leechers']
self.seeders = torrent_json_response['torrent']['seeders']
self.snatched = torrent_json_response['torrent']['snatched']
self.time = torrent_json_response['torrent']['time']
self.description = torrent_json_response['torrent']['description']
self.file_list = [ re.match(r"(.+){{{(\d+)}}}", item).groups()
for item in torrent_json_response['torrent']['fileList'].split("|||") ] # tuple ( filename, filesize )
self.file_path = torrent_json_response['torrent']['filePath']
self.user = self.parent_api.get_user(torrent_json_response['torrent']['userId'])
def set_torrent_artist_data(self, artist_torrent_json_response):
if self.id != artist_torrent_json_response['id']:
raise InvalidTorrentException("Tried to update a Torrent's information from an 'artist' API call with a different id." +
" Should be %s, got %s" % (self.id, artist_torrent_json_response['id']) )
self.group = self.parent_api.get_torrent_group(artist_torrent_json_response['groupId'])
self.media = artist_torrent_json_response['media']
self.format = artist_torrent_json_response['format']
self.encoding = artist_torrent_json_response['encoding']
self.remaster_year = artist_torrent_json_response['remasterYear']
self.remastered = artist_torrent_json_response['remastered']
self.remaster_title = artist_torrent_json_response['remasterTitle']
self.remaster_record_label = artist_torrent_json_response['remasterRecordLabel']
self.scene = artist_torrent_json_response['scene']
self.has_log = artist_torrent_json_response['hasLog']
self.has_cue = artist_torrent_json_response['hasCue']
self.log_score = artist_torrent_json_response['logScore']
self.file_count = artist_torrent_json_response['fileCount']
self.free_torrent = artist_torrent_json_response['freeTorrent']
self.size = artist_torrent_json_response['size']
self.leechers = artist_torrent_json_response['leechers']
self.seeders = artist_torrent_json_response['seeders']
self.snatched = artist_torrent_json_response['snatched']
self.time = artist_torrent_json_response['time']
self.has_file = artist_torrent_json_response['hasFile']
def set_torrent_group_data(self, group_torrent_json_response):
if self.id != group_torrent_json_response['id']:
raise InvalidTorrentException("Tried to update a Torrent's information from a 'torrentgroup' API call with a different id." +
" Should be %s, got %s" % (self.id, group_torrent_json_response['id']) )
self.group = self.parent_api.get_torrent_group(group_torrent_json_response['groupId'])
self.media = group_torrent_json_response['media']
self.format = group_torrent_json_response['format']
self.encoding = group_torrent_json_response['encoding']
self.remastered = group_torrent_json_response['remastered']
self.remaster_year = group_torrent_json_response['remasterYear']
self.remaster_title = group_torrent_json_response['remasterTitle']
self.remaster_record_label = group_torrent_json_response['remasterRecordLabel']
self.remaster_catalogue_number = group_torrent_json_response['remasterCatalogueNumber']
self.scene = group_torrent_json_response['scene']
self.has_log = group_torrent_json_response['hasLog']
self.has_cue = group_torrent_json_response['hasCue']
self.log_score = group_torrent_json_response['logScore']
self.file_count = group_torrent_json_response['fileCount']
self.size = group_torrent_json_response['size']
self.seeders = group_torrent_json_response['seeders']
self.leechers = group_torrent_json_response['leechers']
self.snatched = group_torrent_json_response['snatched']
self.free_torrent = group_torrent_json_response['freeTorrent']
self.time = group_torrent_json_response['time']
self.description = group_torrent_json_response['description']
self.file_list = [ re.match(r"(.+){{{(\d+)}}}", item).groups()
for item in group_torrent_json_response['fileList'].split("|||") ] # tuple ( filename, filesize )
self.file_path = group_torrent_json_response['filePath']
self.user = self.parent_api.get_user(group_torrent_json_response['userId'])
def set_torrent_search_data(self, search_torrent_json_response):
if self.id != search_torrent_json_response['torrentId']:
raise InvalidTorrentException("Tried to update a Torrent's information from a 'browse'/search API call with a different id." +
" Should be %s, got %s" % (self.id, search_torrent_json_response['torrentId']) )
# TODO: Add conditionals to handle torrents that aren't music
self.group = self.parent_api.get_torrent_group(search_torrent_json_response['groupId'])
self.remastered = search_torrent_json_response['remastered']
self.remaster_year = search_torrent_json_response['remasterYear']
self.remaster_title = search_torrent_json_response['remasterTitle']
self.remaster_catalogue_number = search_torrent_json_response['remasterCatalogueNumber']
self.media = search_torrent_json_response['media']
self.format = search_torrent_json_response['format']
self.encoding = search_torrent_json_response['encoding']
self.has_log = search_torrent_json_response['hasLog']
self.has_cue = search_torrent_json_response['hasCue']
self.log_score = search_torrent_json_response['logScore']
self.scene = search_torrent_json_response['scene']
self.file_count = search_torrent_json_response['fileCount']
self.size = search_torrent_json_response['size']
self.seeders = search_torrent_json_response['seeders']
self.leechers = search_torrent_json_response['leechers']
self.snatched = search_torrent_json_response['snatches']
self.free_torrent = search_torrent_json_response['isFreeleech'] or search_torrent_json_response['isPersonalFreeleech']
self.time = search_torrent_json_response['time']
self.can_use_token = search_torrent_json_response.get('canUseToken', False)
def set_torrent_top_10_data(self, top_10_json_response):
if self.id != top_10_json_response['torrentId']:
raise InvalidTorrentException("Tried to update a Torrent's information from a 'browse'/search API call with a different id." +
" Should be %s, got %s" % (self.id, top_10_json_response['torrentId']) )
# TODO: Add conditionals to handle torrents that aren't music
self.group = self.parent_api.get_torrent_group(top_10_json_response['groupId'])
self.group.name = top_10_json_response['groupName']
if not self.group.music_info and top_10_json_response['artist']:
self.group.music_info = {'artists': [self.parent_api.get_artist(name=HTMLParser().unescape(top_10_json_response['artist']))]}
self.remaster_title = top_10_json_response['remasterTitle']
self.media = top_10_json_response['media']
self.format = top_10_json_response['format']
self.encoding = top_10_json_response['encoding']
self.has_log = top_10_json_response['hasLog']
self.has_cue = top_10_json_response['hasCue']
self.scene = top_10_json_response['scene']
self.seeders = top_10_json_response['seeders']
self.leechers = top_10_json_response['leechers']
self.snatched = top_10_json_response['snatched']
def __repr__(self):
if self.group:
groupname = self.group.name
else:
groupname = "Unknown Group"
return "Torrent: %s - %s - ID: %s" % (groupname, self.encoding, self.id)
| 10,728
|
Python
|
.py
| 168
| 53.797619
| 138
| 0.661324
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,851
|
media.py
|
rembo10_headphones/lib/pygazelle/media.py
|
CD = "CD"
DVD = "DVD"
VINYL = "Vinyl"
SOUNDBOARD = "Soundboard"
SACD = "SACD"
DAT = "DAT"
CASETTE = "Casette"
WEB = "WEB"
BLU_RAY = "Blu-ray"
ALL_MEDIAS = [CD, DVD, VINYL, SOUNDBOARD, SACD, DAT, CASETTE, WEB, BLU_RAY]
| 218
|
Python
|
.py
| 10
| 20.8
| 75
| 0.663462
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,852
|
inbox.py
|
rembo10_headphones/lib/pygazelle/inbox.py
|
class MailboxMessage(object):
def __init__(self, api, message):
self.id = message['convId']
self.conv = Conversation(api, self.id)
self.subject = message['subject']
self.unread = message['unread']
self.sticky = message['sticky']
self.fwd_id = message['forwardedId']
self.fwd_name = message['forwardedName']
self.sender_id = message['senderId']
self.username = message['username']
self.donor = message['donor']
self.warned = message['warned']
self.enabled = message['enabled']
self.date = message['date']
def __repr__(self):
return "MailboxMessage ID %s - %s %s %s" % (self.id, self.subject, self.sender_id, self.username)
class ConversationMessage(object):
def __init__(self, msg_resp):
self.id = msg_resp['messageId']
self.sender_id = msg_resp['senderId']
self.sender_name = msg_resp['senderName']
self.sent_date = msg_resp['sentDate']
self.bb_body = msg_resp['bbBody']
self.body = msg_resp['body']
def __repr__(self):
return "ConversationMessage ID %s - %s %s" % (self.id, self.sender_name, self.sent_date)
class Conversation(object):
def __init__(self, api, conv_id):
self.id = conv_id
self.parent_api = api
self.subject = None
self.sticky = None
self.messages = []
def __repr__(self):
return "Conversation ID %s - %s" % (self.id, self.subject)
def set_conv_data(self, conv_resp):
assert self.id == conv_resp['convId']
self.subject = conv_resp['subject']
self.sticky = conv_resp['sticky']
self.messages = [ConversationMessage(m) for m in conv_resp['messages']]
def update_conv_data(self):
response = self.parent_api.request(action='inbox',
type='viewconv', id=self.id)
self.set_conv_data(response)
class Mailbox(object):
"""
This class represents the logged in user's inbox/sentbox
"""
def __init__(self, parent_api, boxtype='inbox', page='1', sort='unread'):
self.parent_api = parent_api
self.boxtype = boxtype
self.current_page = page
self.total_pages = None
self.sort = sort
self.messages = None
def set_mbox_data(self, mbox_resp):
"""
Takes parsed JSON response from 'inbox' action on api
and updates the available subset of mailbox information.
"""
self.current_page = mbox_resp['currentPage']
self.total_pages = mbox_resp['pages']
self.messages = \
[MailboxMessage(self.parent_api, m) for m in mbox_resp['messages']]
def update_mbox_data(self):
response = self.parent_api.request(action='inbox',
type=self.boxtype, page=self.current_page, sort=self.sort)
self.set_mbox_data(response)
def next_page(self):
if not self.total_pages:
raise ValueError("call update_mbox_data() first")
total_pages = int(self.total_pages)
cur_page = int(self.current_page)
if cur_page < total_pages:
return Mailbox(self.parent_api, self.boxtype,
str(cur_page + 1), self.sort)
raise ValueError("Already at page %d/%d" % (cur_page, total_pages))
def prev_page(self):
if not self.total_pages:
raise ValueError("call update_mbox_data() first")
total_pages = int(self.total_pages)
cur_page = int(self.current_page)
if cur_page > 1:
return Mailbox(self.parent_api, self.boxtype,
str(cur_page - 1), self.sort)
raise ValueError("Already at page %d/%d" % (cur_page, total_pages))
def __repr__(self):
return "Mailbox: %s %s Page %s/%s" \
% (self.boxtype, self.sort,
self.current_page, self.total_pages)
| 3,948
|
Python
|
.py
| 91
| 33.934066
| 105
| 0.594376
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,853
|
models.py
|
rembo10_headphones/lib/charset_normalizer/models.py
|
import warnings
from collections import Counter
from encodings.aliases import aliases
from hashlib import sha256
from json import dumps
from re import sub
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
from .constant import NOT_PRINTABLE_PATTERN, TOO_BIG_SEQUENCE
from .md import mess_ratio
from .utils import iana_name, is_multi_byte_encoding, unicode_range
class CharsetMatch:
def __init__(
self,
payload: bytes,
guessed_encoding: str,
mean_mess_ratio: float,
has_sig_or_bom: bool,
languages: "CoherenceMatches",
decoded_payload: Optional[str] = None,
):
self._payload = payload # type: bytes
self._encoding = guessed_encoding # type: str
self._mean_mess_ratio = mean_mess_ratio # type: float
self._languages = languages # type: CoherenceMatches
self._has_sig_or_bom = has_sig_or_bom # type: bool
self._unicode_ranges = None # type: Optional[List[str]]
self._leaves = [] # type: List[CharsetMatch]
self._mean_coherence_ratio = 0.0 # type: float
self._output_payload = None # type: Optional[bytes]
self._output_encoding = None # type: Optional[str]
self._string = decoded_payload # type: Optional[str]
def __eq__(self, other: object) -> bool:
if not isinstance(other, CharsetMatch):
raise TypeError(
"__eq__ cannot be invoked on {} and {}.".format(
str(other.__class__), str(self.__class__)
)
)
return self.encoding == other.encoding and self.fingerprint == other.fingerprint
def __lt__(self, other: object) -> bool:
"""
Implemented to make sorted available upon CharsetMatches items.
"""
if not isinstance(other, CharsetMatch):
raise ValueError
chaos_difference = abs(self.chaos - other.chaos) # type: float
coherence_difference = abs(self.coherence - other.coherence) # type: float
# Bellow 1% difference --> Use Coherence
if chaos_difference < 0.01 and coherence_difference > 0.02:
# When having a tough decision, use the result that decoded as many multi-byte as possible.
if chaos_difference == 0.0 and self.coherence == other.coherence:
return self.multi_byte_usage > other.multi_byte_usage
return self.coherence > other.coherence
return self.chaos < other.chaos
@property
def multi_byte_usage(self) -> float:
return 1.0 - len(str(self)) / len(self.raw)
@property
def chaos_secondary_pass(self) -> float:
"""
Check once again chaos in decoded text, except this time, with full content.
Use with caution, this can be very slow.
Notice: Will be removed in 3.0
"""
warnings.warn(
"chaos_secondary_pass is deprecated and will be removed in 3.0",
DeprecationWarning,
)
return mess_ratio(str(self), 1.0)
@property
def coherence_non_latin(self) -> float:
"""
Coherence ratio on the first non-latin language detected if ANY.
Notice: Will be removed in 3.0
"""
warnings.warn(
"coherence_non_latin is deprecated and will be removed in 3.0",
DeprecationWarning,
)
return 0.0
@property
def w_counter(self) -> Counter:
"""
Word counter instance on decoded text.
Notice: Will be removed in 3.0
"""
warnings.warn(
"w_counter is deprecated and will be removed in 3.0", DeprecationWarning
)
string_printable_only = sub(NOT_PRINTABLE_PATTERN, " ", str(self).lower())
return Counter(string_printable_only.split())
def __str__(self) -> str:
# Lazy Str Loading
if self._string is None:
self._string = str(self._payload, self._encoding, "strict")
return self._string
def __repr__(self) -> str:
return "<CharsetMatch '{}' bytes({})>".format(self.encoding, self.fingerprint)
def add_submatch(self, other: "CharsetMatch") -> None:
if not isinstance(other, CharsetMatch) or other == self:
raise ValueError(
"Unable to add instance <{}> as a submatch of a CharsetMatch".format(
other.__class__
)
)
other._string = None # Unload RAM usage; dirty trick.
self._leaves.append(other)
@property
def encoding(self) -> str:
return self._encoding
@property
def encoding_aliases(self) -> List[str]:
"""
Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855.
"""
also_known_as = [] # type: List[str]
for u, p in aliases.items():
if self.encoding == u:
also_known_as.append(p)
elif self.encoding == p:
also_known_as.append(u)
return also_known_as
@property
def bom(self) -> bool:
return self._has_sig_or_bom
@property
def byte_order_mark(self) -> bool:
return self._has_sig_or_bom
@property
def languages(self) -> List[str]:
"""
Return the complete list of possible languages found in decoded sequence.
Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'.
"""
return [e[0] for e in self._languages]
@property
def language(self) -> str:
"""
Most probable language found in decoded sequence. If none were detected or inferred, the property will return
"Unknown".
"""
if not self._languages:
# Trying to infer the language based on the given encoding
# Its either English or we should not pronounce ourselves in certain cases.
if "ascii" in self.could_be_from_charset:
return "English"
# doing it there to avoid circular import
from charset_normalizer.cd import encoding_languages, mb_encoding_languages
languages = (
mb_encoding_languages(self.encoding)
if is_multi_byte_encoding(self.encoding)
else encoding_languages(self.encoding)
)
if len(languages) == 0 or "Latin Based" in languages:
return "Unknown"
return languages[0]
return self._languages[0][0]
@property
def chaos(self) -> float:
return self._mean_mess_ratio
@property
def coherence(self) -> float:
if not self._languages:
return 0.0
return self._languages[0][1]
@property
def percent_chaos(self) -> float:
return round(self.chaos * 100, ndigits=3)
@property
def percent_coherence(self) -> float:
return round(self.coherence * 100, ndigits=3)
@property
def raw(self) -> bytes:
"""
Original untouched bytes.
"""
return self._payload
@property
def submatch(self) -> List["CharsetMatch"]:
return self._leaves
@property
def has_submatch(self) -> bool:
return len(self._leaves) > 0
@property
def alphabets(self) -> List[str]:
if self._unicode_ranges is not None:
return self._unicode_ranges
# list detected ranges
detected_ranges = [
unicode_range(char) for char in str(self)
] # type: List[Optional[str]]
# filter and sort
self._unicode_ranges = sorted(list({r for r in detected_ranges if r}))
return self._unicode_ranges
@property
def could_be_from_charset(self) -> List[str]:
"""
The complete list of encoding that output the exact SAME str result and therefore could be the originating
encoding.
This list does include the encoding available in property 'encoding'.
"""
return [self._encoding] + [m.encoding for m in self._leaves]
def first(self) -> "CharsetMatch":
"""
Kept for BC reasons. Will be removed in 3.0.
"""
return self
def best(self) -> "CharsetMatch":
"""
Kept for BC reasons. Will be removed in 3.0.
"""
return self
def output(self, encoding: str = "utf_8") -> bytes:
"""
Method to get re-encoded bytes payload using given target encoding. Default to UTF-8.
Any errors will be simply ignored by the encoder NOT replaced.
"""
if self._output_encoding is None or self._output_encoding != encoding:
self._output_encoding = encoding
self._output_payload = str(self).encode(encoding, "replace")
return self._output_payload # type: ignore
@property
def fingerprint(self) -> str:
"""
Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one.
"""
return sha256(self.output()).hexdigest()
class CharsetMatches:
"""
Container with every CharsetMatch items ordered by default from most probable to the less one.
Act like a list(iterable) but does not implements all related methods.
"""
def __init__(self, results: List[CharsetMatch] = None):
self._results = sorted(results) if results else [] # type: List[CharsetMatch]
def __iter__(self) -> Iterator[CharsetMatch]:
yield from self._results
def __getitem__(self, item: Union[int, str]) -> CharsetMatch:
"""
Retrieve a single item either by its position or encoding name (alias may be used here).
Raise KeyError upon invalid index or encoding not present in results.
"""
if isinstance(item, int):
return self._results[item]
if isinstance(item, str):
item = iana_name(item, False)
for result in self._results:
if item in result.could_be_from_charset:
return result
raise KeyError
def __len__(self) -> int:
return len(self._results)
def __bool__(self) -> bool:
return len(self._results) > 0
def append(self, item: CharsetMatch) -> None:
"""
Insert a single match. Will be inserted accordingly to preserve sort.
Can be inserted as a submatch.
"""
if not isinstance(item, CharsetMatch):
raise ValueError(
"Cannot append instance '{}' to CharsetMatches".format(
str(item.__class__)
)
)
# We should disable the submatch factoring when the input file is too heavy (conserve RAM usage)
if len(item.raw) <= TOO_BIG_SEQUENCE:
for match in self._results:
if match.fingerprint == item.fingerprint and match.chaos == item.chaos:
match.add_submatch(item)
return
self._results.append(item)
self._results = sorted(self._results)
def best(self) -> Optional["CharsetMatch"]:
"""
Simply return the first match. Strict equivalent to matches[0].
"""
if not self._results:
return None
return self._results[0]
def first(self) -> Optional["CharsetMatch"]:
"""
Redundant method, call the method best(). Kept for BC reasons.
"""
return self.best()
CoherenceMatch = Tuple[str, float]
CoherenceMatches = List[CoherenceMatch]
class CliDetectionResult:
def __init__(
self,
path: str,
encoding: Optional[str],
encoding_aliases: List[str],
alternative_encodings: List[str],
language: str,
alphabets: List[str],
has_sig_or_bom: bool,
chaos: float,
coherence: float,
unicode_path: Optional[str],
is_preferred: bool,
):
self.path = path # type: str
self.unicode_path = unicode_path # type: Optional[str]
self.encoding = encoding # type: Optional[str]
self.encoding_aliases = encoding_aliases # type: List[str]
self.alternative_encodings = alternative_encodings # type: List[str]
self.language = language # type: str
self.alphabets = alphabets # type: List[str]
self.has_sig_or_bom = has_sig_or_bom # type: bool
self.chaos = chaos # type: float
self.coherence = coherence # type: float
self.is_preferred = is_preferred # type: bool
@property
def __dict__(self) -> Dict[str, Any]: # type: ignore
return {
"path": self.path,
"encoding": self.encoding,
"encoding_aliases": self.encoding_aliases,
"alternative_encodings": self.alternative_encodings,
"language": self.language,
"alphabets": self.alphabets,
"has_sig_or_bom": self.has_sig_or_bom,
"chaos": self.chaos,
"coherence": self.coherence,
"unicode_path": self.unicode_path,
"is_preferred": self.is_preferred,
}
def to_json(self) -> str:
return dumps(self.__dict__, ensure_ascii=True, indent=4)
| 13,303
|
Python
|
.py
| 329
| 31.231003
| 120
| 0.604911
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,854
|
cd.py
|
rembo10_headphones/lib/charset_normalizer/cd.py
|
import importlib
from codecs import IncrementalDecoder
from collections import Counter, OrderedDict
from functools import lru_cache
from typing import Dict, List, Optional, Tuple
from .assets import FREQUENCIES
from .constant import KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES
from .md import is_suspiciously_successive_range
from .models import CoherenceMatches
from .utils import (
is_accentuated,
is_latin,
is_multi_byte_encoding,
is_unicode_range_secondary,
unicode_range,
)
def encoding_unicode_range(iana_name: str) -> List[str]:
"""
Return associated unicode ranges in a single byte code page.
"""
if is_multi_byte_encoding(iana_name):
raise IOError("Function not supported on multi-byte code page")
decoder = importlib.import_module("encodings.{}".format(iana_name)).IncrementalDecoder # type: ignore
p = decoder(errors="ignore") # type: IncrementalDecoder
seen_ranges = {} # type: Dict[str, int]
character_count = 0 # type: int
for i in range(0x40, 0xFF):
chunk = p.decode(bytes([i])) # type: str
if chunk:
character_range = unicode_range(chunk) # type: Optional[str]
if character_range is None:
continue
if is_unicode_range_secondary(character_range) is False:
if character_range not in seen_ranges:
seen_ranges[character_range] = 0
seen_ranges[character_range] += 1
character_count += 1
return sorted(
[
character_range
for character_range in seen_ranges
if seen_ranges[character_range] / character_count >= 0.15
]
)
def unicode_range_languages(primary_range: str) -> List[str]:
"""
Return inferred languages used with a unicode range.
"""
languages = [] # type: List[str]
for language, characters in FREQUENCIES.items():
for character in characters:
if unicode_range(character) == primary_range:
languages.append(language)
break
return languages
@lru_cache()
def encoding_languages(iana_name: str) -> List[str]:
"""
Single-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
unicode_ranges = encoding_unicode_range(iana_name) # type: List[str]
primary_range = None # type: Optional[str]
for specified_range in unicode_ranges:
if "Latin" not in specified_range:
primary_range = specified_range
break
if primary_range is None:
return ["Latin Based"]
return unicode_range_languages(primary_range)
@lru_cache()
def mb_encoding_languages(iana_name: str) -> List[str]:
"""
Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
if (
iana_name.startswith("shift_")
or iana_name.startswith("iso2022_jp")
or iana_name.startswith("euc_j")
or iana_name == "cp932"
):
return ["Japanese"]
if iana_name.startswith("gb") or iana_name in ZH_NAMES:
return ["Chinese", "Classical Chinese"]
if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
return ["Korean"]
return []
@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
def get_target_features(language: str) -> Tuple[bool, bool]:
"""
Determine main aspects from a supported language if it contains accents and if is pure Latin.
"""
target_have_accents = False # type: bool
target_pure_latin = True # type: bool
for character in FREQUENCIES[language]:
if not target_have_accents and is_accentuated(character):
target_have_accents = True
if target_pure_latin and is_latin(character) is False:
target_pure_latin = False
return target_have_accents, target_pure_latin
def alphabet_languages(
characters: List[str], ignore_non_latin: bool = False
) -> List[str]:
"""
Return associated languages associated to given characters.
"""
languages = [] # type: List[Tuple[str, float]]
source_have_accents = any(is_accentuated(character) for character in characters)
for language, language_characters in FREQUENCIES.items():
target_have_accents, target_pure_latin = get_target_features(language)
if ignore_non_latin and target_pure_latin is False:
continue
if target_have_accents is False and source_have_accents:
continue
character_count = len(language_characters) # type: int
character_match_count = len(
[c for c in language_characters if c in characters]
) # type: int
ratio = character_match_count / character_count # type: float
if ratio >= 0.2:
languages.append((language, ratio))
languages = sorted(languages, key=lambda x: x[1], reverse=True)
return [compatible_language[0] for compatible_language in languages]
def characters_popularity_compare(
language: str, ordered_characters: List[str]
) -> float:
"""
Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
"""
if language not in FREQUENCIES:
raise ValueError("{} not available".format(language))
character_approved_count = 0 # type: int
for character in ordered_characters:
if character not in FREQUENCIES[language]:
continue
characters_before_source = FREQUENCIES[language][
0 : FREQUENCIES[language].index(character)
] # type: List[str]
characters_after_source = FREQUENCIES[language][
FREQUENCIES[language].index(character) :
] # type: List[str]
characters_before = ordered_characters[
0 : ordered_characters.index(character)
] # type: List[str]
characters_after = ordered_characters[
ordered_characters.index(character) :
] # type: List[str]
before_match_count = [
e in characters_before for e in characters_before_source
].count(
True
) # type: int
after_match_count = [
e in characters_after for e in characters_after_source
].count(
True
) # type: int
if len(characters_before_source) == 0 and before_match_count <= 4:
character_approved_count += 1
continue
if len(characters_after_source) == 0 and after_match_count <= 4:
character_approved_count += 1
continue
if (
before_match_count / len(characters_before_source) >= 0.4
or after_match_count / len(characters_after_source) >= 0.4
):
character_approved_count += 1
continue
return character_approved_count / len(ordered_characters)
def alpha_unicode_split(decoded_sequence: str) -> List[str]:
"""
Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
One containing the latin letters and the other hebrew.
"""
layers = OrderedDict() # type: Dict[str, str]
for character in decoded_sequence:
if character.isalpha() is False:
continue
character_range = unicode_range(character) # type: Optional[str]
if character_range is None:
continue
layer_target_range = None # type: Optional[str]
for discovered_range in layers:
if (
is_suspiciously_successive_range(discovered_range, character_range)
is False
):
layer_target_range = discovered_range
break
if layer_target_range is None:
layer_target_range = character_range
if layer_target_range not in layers:
layers[layer_target_range] = character.lower()
continue
layers[layer_target_range] += character.lower()
return list(layers.values())
def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches:
"""
This function merge results previously given by the function coherence_ratio.
The return type is the same as coherence_ratio.
"""
per_language_ratios = OrderedDict() # type: Dict[str, List[float]]
for result in results:
for sub_result in result:
language, ratio = sub_result
if language not in per_language_ratios:
per_language_ratios[language] = [ratio]
continue
per_language_ratios[language].append(ratio)
merge = [
(
language,
round(
sum(per_language_ratios[language]) / len(per_language_ratios[language]),
4,
),
)
for language in per_language_ratios
]
return sorted(merge, key=lambda x: x[1], reverse=True)
@lru_cache(maxsize=2048)
def coherence_ratio(
decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None
) -> CoherenceMatches:
"""
Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
A layer = Character extraction by alphabets/ranges.
"""
results = [] # type: List[Tuple[str, float]]
ignore_non_latin = False # type: bool
sufficient_match_count = 0 # type: int
lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else []
if "Latin Based" in lg_inclusion_list:
ignore_non_latin = True
lg_inclusion_list.remove("Latin Based")
for layer in alpha_unicode_split(decoded_sequence):
sequence_frequencies = Counter(layer) # type: Counter
most_common = sequence_frequencies.most_common()
character_count = sum(o for c, o in most_common) # type: int
if character_count <= TOO_SMALL_SEQUENCE:
continue
popular_character_ordered = [c for c, o in most_common] # type: List[str]
for language in lg_inclusion_list or alphabet_languages(
popular_character_ordered, ignore_non_latin
):
ratio = characters_popularity_compare(
language, popular_character_ordered
) # type: float
if ratio < threshold:
continue
elif ratio >= 0.8:
sufficient_match_count += 1
results.append((language, round(ratio, 4)))
if sufficient_match_count >= 3:
break
return sorted(results, key=lambda x: x[1], reverse=True)
| 11,076
|
Python
|
.py
| 261
| 34.038314
| 118
| 0.645864
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,855
|
md.py
|
rembo10_headphones/lib/charset_normalizer/md.py
|
from functools import lru_cache
from typing import List, Optional
from .constant import COMMON_SAFE_ASCII_CHARACTERS, UNICODE_SECONDARY_RANGE_KEYWORD
from .utils import (
is_accentuated,
is_ascii,
is_case_variable,
is_cjk,
is_emoticon,
is_hangul,
is_hiragana,
is_katakana,
is_latin,
is_punctuation,
is_separator,
is_symbol,
is_thai,
remove_accent,
unicode_range,
)
class MessDetectorPlugin:
"""
Base abstract class used for mess detection plugins.
All detectors MUST extend and implement given methods.
"""
def eligible(self, character: str) -> bool:
"""
Determine if given character should be fed in.
"""
raise NotImplementedError # pragma: nocover
def feed(self, character: str) -> None:
"""
The main routine to be executed upon character.
Insert the logic in witch the text would be considered chaotic.
"""
raise NotImplementedError # pragma: nocover
def reset(self) -> None: # pragma: no cover
"""
Permit to reset the plugin to the initial state.
"""
raise NotImplementedError
@property
def ratio(self) -> float:
"""
Compute the chaos ratio based on what your feed() has seen.
Must NOT be lower than 0.; No restriction gt 0.
"""
raise NotImplementedError # pragma: nocover
class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._punctuation_count = 0 # type: int
self._symbol_count = 0 # type: int
self._character_count = 0 # type: int
self._last_printable_char = None # type: Optional[str]
self._frenzy_symbol_in_word = False # type: bool
def eligible(self, character: str) -> bool:
return character.isprintable()
def feed(self, character: str) -> None:
self._character_count += 1
if (
character != self._last_printable_char
and character not in COMMON_SAFE_ASCII_CHARACTERS
):
if is_punctuation(character):
self._punctuation_count += 1
elif (
character.isdigit() is False
and is_symbol(character)
and is_emoticon(character) is False
):
self._symbol_count += 2
self._last_printable_char = character
def reset(self) -> None: # pragma: no cover
self._punctuation_count = 0
self._character_count = 0
self._symbol_count = 0
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
ratio_of_punctuation = (
self._punctuation_count + self._symbol_count
) / self._character_count # type: float
return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0
class TooManyAccentuatedPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._character_count = 0 # type: int
self._accentuated_count = 0 # type: int
def eligible(self, character: str) -> bool:
return character.isalpha()
def feed(self, character: str) -> None:
self._character_count += 1
if is_accentuated(character):
self._accentuated_count += 1
def reset(self) -> None: # pragma: no cover
self._character_count = 0
self._accentuated_count = 0
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
ratio_of_accentuation = (
self._accentuated_count / self._character_count
) # type: float
return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0
class UnprintablePlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._unprintable_count = 0 # type: int
self._character_count = 0 # type: int
def eligible(self, character: str) -> bool:
return True
def feed(self, character: str) -> None:
if (
character.isspace() is False # includes \n \t \r \v
and character.isprintable() is False
and character != "\x1A" # Why? Its the ASCII substitute character.
):
self._unprintable_count += 1
self._character_count += 1
def reset(self) -> None: # pragma: no cover
self._unprintable_count = 0
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
return (self._unprintable_count * 8) / self._character_count
class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._successive_count = 0 # type: int
self._character_count = 0 # type: int
self._last_latin_character = None # type: Optional[str]
def eligible(self, character: str) -> bool:
return character.isalpha() and is_latin(character)
def feed(self, character: str) -> None:
self._character_count += 1
if (
self._last_latin_character is not None
and is_accentuated(character)
and is_accentuated(self._last_latin_character)
):
if character.isupper() and self._last_latin_character.isupper():
self._successive_count += 1
# Worse if its the same char duplicated with different accent.
if remove_accent(character) == remove_accent(self._last_latin_character):
self._successive_count += 1
self._last_latin_character = character
def reset(self) -> None: # pragma: no cover
self._successive_count = 0
self._character_count = 0
self._last_latin_character = None
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
return (self._successive_count * 2) / self._character_count
class SuspiciousRange(MessDetectorPlugin):
def __init__(self) -> None:
self._suspicious_successive_range_count = 0 # type: int
self._character_count = 0 # type: int
self._last_printable_seen = None # type: Optional[str]
def eligible(self, character: str) -> bool:
return character.isprintable()
def feed(self, character: str) -> None:
self._character_count += 1
if (
character.isspace()
or is_punctuation(character)
or character in COMMON_SAFE_ASCII_CHARACTERS
):
self._last_printable_seen = None
return
if self._last_printable_seen is None:
self._last_printable_seen = character
return
unicode_range_a = unicode_range(
self._last_printable_seen
) # type: Optional[str]
unicode_range_b = unicode_range(character) # type: Optional[str]
if is_suspiciously_successive_range(unicode_range_a, unicode_range_b):
self._suspicious_successive_range_count += 1
self._last_printable_seen = character
def reset(self) -> None: # pragma: no cover
self._character_count = 0
self._suspicious_successive_range_count = 0
self._last_printable_seen = None
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
ratio_of_suspicious_range_usage = (
self._suspicious_successive_range_count * 2
) / self._character_count # type: float
if ratio_of_suspicious_range_usage < 0.1:
return 0.0
return ratio_of_suspicious_range_usage
class SuperWeirdWordPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._word_count = 0 # type: int
self._bad_word_count = 0 # type: int
self._foreign_long_count = 0 # type: int
self._is_current_word_bad = False # type: bool
self._foreign_long_watch = False # type: bool
self._character_count = 0 # type: int
self._bad_character_count = 0 # type: int
self._buffer = "" # type: str
self._buffer_accent_count = 0 # type: int
def eligible(self, character: str) -> bool:
return True
def feed(self, character: str) -> None:
if character.isalpha():
self._buffer = "".join([self._buffer, character])
if is_accentuated(character):
self._buffer_accent_count += 1
if (
self._foreign_long_watch is False
and (is_latin(character) is False or is_accentuated(character))
and is_cjk(character) is False
and is_hangul(character) is False
and is_katakana(character) is False
and is_hiragana(character) is False
and is_thai(character) is False
):
self._foreign_long_watch = True
return
if not self._buffer:
return
if (
character.isspace() or is_punctuation(character) or is_separator(character)
) and self._buffer:
self._word_count += 1
buffer_length = len(self._buffer) # type: int
self._character_count += buffer_length
if buffer_length >= 4:
if self._buffer_accent_count / buffer_length > 0.34:
self._is_current_word_bad = True
# Word/Buffer ending with a upper case accentuated letter are so rare,
# that we will consider them all as suspicious. Same weight as foreign_long suspicious.
if is_accentuated(self._buffer[-1]) and self._buffer[-1].isupper():
self._foreign_long_count += 1
self._is_current_word_bad = True
if buffer_length >= 24 and self._foreign_long_watch:
self._foreign_long_count += 1
self._is_current_word_bad = True
if self._is_current_word_bad:
self._bad_word_count += 1
self._bad_character_count += len(self._buffer)
self._is_current_word_bad = False
self._foreign_long_watch = False
self._buffer = ""
self._buffer_accent_count = 0
elif (
character not in {"<", ">", "-", "="}
and character.isdigit() is False
and is_symbol(character)
):
self._is_current_word_bad = True
self._buffer += character
def reset(self) -> None: # pragma: no cover
self._buffer = ""
self._is_current_word_bad = False
self._foreign_long_watch = False
self._bad_word_count = 0
self._word_count = 0
self._character_count = 0
self._bad_character_count = 0
self._foreign_long_count = 0
@property
def ratio(self) -> float:
if self._word_count <= 10 and self._foreign_long_count == 0:
return 0.0
return self._bad_character_count / self._character_count
class CjkInvalidStopPlugin(MessDetectorPlugin):
"""
GB(Chinese) based encoding often render the stop incorrectly when the content does not fit and
can be easily detected. Searching for the overuse of '丅' and '丄'.
"""
def __init__(self) -> None:
self._wrong_stop_count = 0 # type: int
self._cjk_character_count = 0 # type: int
def eligible(self, character: str) -> bool:
return True
def feed(self, character: str) -> None:
if character in {"丅", "丄"}:
self._wrong_stop_count += 1
return
if is_cjk(character):
self._cjk_character_count += 1
def reset(self) -> None: # pragma: no cover
self._wrong_stop_count = 0
self._cjk_character_count = 0
@property
def ratio(self) -> float:
if self._cjk_character_count < 16:
return 0.0
return self._wrong_stop_count / self._cjk_character_count
class ArchaicUpperLowerPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._buf = False # type: bool
self._character_count_since_last_sep = 0 # type: int
self._successive_upper_lower_count = 0 # type: int
self._successive_upper_lower_count_final = 0 # type: int
self._character_count = 0 # type: int
self._last_alpha_seen = None # type: Optional[str]
self._current_ascii_only = True # type: bool
def eligible(self, character: str) -> bool:
return True
def feed(self, character: str) -> None:
is_concerned = character.isalpha() and is_case_variable(character)
chunk_sep = is_concerned is False
if chunk_sep and self._character_count_since_last_sep > 0:
if (
self._character_count_since_last_sep <= 64
and character.isdigit() is False
and self._current_ascii_only is False
):
self._successive_upper_lower_count_final += (
self._successive_upper_lower_count
)
self._successive_upper_lower_count = 0
self._character_count_since_last_sep = 0
self._last_alpha_seen = None
self._buf = False
self._character_count += 1
self._current_ascii_only = True
return
if self._current_ascii_only is True and is_ascii(character) is False:
self._current_ascii_only = False
if self._last_alpha_seen is not None:
if (character.isupper() and self._last_alpha_seen.islower()) or (
character.islower() and self._last_alpha_seen.isupper()
):
if self._buf is True:
self._successive_upper_lower_count += 2
self._buf = False
else:
self._buf = True
else:
self._buf = False
self._character_count += 1
self._character_count_since_last_sep += 1
self._last_alpha_seen = character
def reset(self) -> None: # pragma: no cover
self._character_count = 0
self._character_count_since_last_sep = 0
self._successive_upper_lower_count = 0
self._successive_upper_lower_count_final = 0
self._last_alpha_seen = None
self._buf = False
self._current_ascii_only = True
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
return self._successive_upper_lower_count_final / self._character_count
def is_suspiciously_successive_range(
unicode_range_a: Optional[str], unicode_range_b: Optional[str]
) -> bool:
"""
Determine if two Unicode range seen next to each other can be considered as suspicious.
"""
if unicode_range_a is None or unicode_range_b is None:
return True
if unicode_range_a == unicode_range_b:
return False
if "Latin" in unicode_range_a and "Latin" in unicode_range_b:
return False
if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b:
return False
# Latin characters can be accompanied with a combining diacritical mark
# eg. Vietnamese.
if ("Latin" in unicode_range_a or "Latin" in unicode_range_b) and (
"Combining" in unicode_range_a or "Combining" in unicode_range_b
):
return False
keywords_range_a, keywords_range_b = unicode_range_a.split(
" "
), unicode_range_b.split(" ")
for el in keywords_range_a:
if el in UNICODE_SECONDARY_RANGE_KEYWORD:
continue
if el in keywords_range_b:
return False
# Japanese Exception
range_a_jp_chars, range_b_jp_chars = (
unicode_range_a
in (
"Hiragana",
"Katakana",
),
unicode_range_b in ("Hiragana", "Katakana"),
)
if (range_a_jp_chars or range_b_jp_chars) and (
"CJK" in unicode_range_a or "CJK" in unicode_range_b
):
return False
if range_a_jp_chars and range_b_jp_chars:
return False
if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b:
if "CJK" in unicode_range_a or "CJK" in unicode_range_b:
return False
if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin":
return False
# Chinese/Japanese use dedicated range for punctuation and/or separators.
if ("CJK" in unicode_range_a or "CJK" in unicode_range_b) or (
unicode_range_a in ["Katakana", "Hiragana"]
and unicode_range_b in ["Katakana", "Hiragana"]
):
if "Punctuation" in unicode_range_a or "Punctuation" in unicode_range_b:
return False
if "Forms" in unicode_range_a or "Forms" in unicode_range_b:
return False
return True
@lru_cache(maxsize=2048)
def mess_ratio(
decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False
) -> float:
"""
Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier.
"""
detectors = [
md_class() for md_class in MessDetectorPlugin.__subclasses__()
] # type: List[MessDetectorPlugin]
length = len(decoded_sequence) + 1 # type: int
mean_mess_ratio = 0.0 # type: float
if length < 512:
intermediary_mean_mess_ratio_calc = 32 # type: int
elif length <= 1024:
intermediary_mean_mess_ratio_calc = 64
else:
intermediary_mean_mess_ratio_calc = 128
for character, index in zip(decoded_sequence + "\n", range(length)):
for detector in detectors:
if detector.eligible(character):
detector.feed(character)
if (
index > 0 and index % intermediary_mean_mess_ratio_calc == 0
) or index == length - 1:
mean_mess_ratio = sum(dt.ratio for dt in detectors)
if mean_mess_ratio >= maximum_threshold:
break
if debug:
for dt in detectors: # pragma: nocover
print(dt.__class__, dt.ratio)
return round(mean_mess_ratio, 3)
| 18,176
|
Python
|
.py
| 444
| 31.425676
| 113
| 0.597308
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,856
|
api.py
|
rembo10_headphones/lib/charset_normalizer/api.py
|
import logging
from os.path import basename, splitext
from typing import BinaryIO, List, Optional, Set
try:
from os import PathLike
except ImportError: # pragma: no cover
PathLike = str # type: ignore
from .cd import (
coherence_ratio,
encoding_languages,
mb_encoding_languages,
merge_coherence_ratios,
)
from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE
from .md import mess_ratio
from .models import CharsetMatch, CharsetMatches
from .utils import (
any_specified_encoding,
iana_name,
identify_sig_or_bom,
is_cp_similar,
is_multi_byte_encoding,
should_strip_sig_or_bom,
)
logger = logging.getLogger("charset_normalizer")
explain_handler = logging.StreamHandler()
explain_handler.setFormatter(
logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
)
def from_bytes(
sequences: bytes,
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.2,
cp_isolation: List[str] = None,
cp_exclusion: List[str] = None,
preemptive_behaviour: bool = True,
explain: bool = False,
) -> CharsetMatches:
"""
Given a raw bytes sequence, return the best possibles charset usable to render str objects.
If there is no results, it is a strong indicator that the source is binary/not text.
By default, the process will extract 5 blocs of 512o each to assess the mess and coherence of a given sequence.
And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
but never take it for granted. Can improve the performance.
You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
purpose.
This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
Custom logging format and handler can be set manually.
"""
if not isinstance(sequences, (bytearray, bytes)):
raise TypeError(
"Expected object of type bytes or bytearray, got: {0}".format(
type(sequences)
)
)
if explain:
previous_logger_level = logger.level # type: int
logger.addHandler(explain_handler)
logger.setLevel(logging.DEBUG)
length = len(sequences) # type: int
if length == 0:
logger.warning("Encoding detection on empty bytes, assuming utf_8 intention.")
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level or logging.WARNING)
return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
if cp_isolation is not None:
logger.debug(
"cp_isolation is set. use this flag for debugging purpose. "
"limited list of encoding allowed : %s.",
", ".join(cp_isolation),
)
cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
else:
cp_isolation = []
if cp_exclusion is not None:
logger.debug(
"cp_exclusion is set. use this flag for debugging purpose. "
"limited list of encoding excluded : %s.",
", ".join(cp_exclusion),
)
cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
else:
cp_exclusion = []
if length <= (chunk_size * steps):
logger.debug(
"override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
steps,
chunk_size,
length,
)
steps = 1
chunk_size = length
if steps > 1 and length / steps < chunk_size:
chunk_size = int(length / steps)
is_too_small_sequence = len(sequences) < TOO_SMALL_SEQUENCE # type: bool
is_too_large_sequence = len(sequences) >= TOO_BIG_SEQUENCE # type: bool
if is_too_small_sequence:
logger.warning(
"Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
length
)
)
elif is_too_large_sequence:
logger.info(
"Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
length
)
)
prioritized_encodings = [] # type: List[str]
specified_encoding = (
any_specified_encoding(sequences) if preemptive_behaviour else None
) # type: Optional[str]
if specified_encoding is not None:
prioritized_encodings.append(specified_encoding)
logger.info(
"Detected declarative mark in sequence. Priority +1 given for %s.",
specified_encoding,
)
tested = set() # type: Set[str]
tested_but_hard_failure = [] # type: List[str]
tested_but_soft_failure = [] # type: List[str]
fallback_ascii = None # type: Optional[CharsetMatch]
fallback_u8 = None # type: Optional[CharsetMatch]
fallback_specified = None # type: Optional[CharsetMatch]
results = CharsetMatches() # type: CharsetMatches
sig_encoding, sig_payload = identify_sig_or_bom(sequences)
if sig_encoding is not None:
prioritized_encodings.append(sig_encoding)
logger.info(
"Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
len(sig_payload),
sig_encoding,
)
prioritized_encodings.append("ascii")
if "utf_8" not in prioritized_encodings:
prioritized_encodings.append("utf_8")
for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
if cp_isolation and encoding_iana not in cp_isolation:
continue
if cp_exclusion and encoding_iana in cp_exclusion:
continue
if encoding_iana in tested:
continue
tested.add(encoding_iana)
decoded_payload = None # type: Optional[str]
bom_or_sig_available = sig_encoding == encoding_iana # type: bool
strip_sig_or_bom = bom_or_sig_available and should_strip_sig_or_bom(
encoding_iana
) # type: bool
if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
logger.debug(
"Encoding %s wont be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
encoding_iana,
)
continue
try:
is_multi_byte_decoder = is_multi_byte_encoding(encoding_iana) # type: bool
except (ModuleNotFoundError, ImportError):
logger.debug(
"Encoding %s does not provide an IncrementalDecoder", encoding_iana
)
continue
try:
if is_too_large_sequence and is_multi_byte_decoder is False:
str(
sequences[: int(50e4)]
if strip_sig_or_bom is False
else sequences[len(sig_payload) : int(50e4)],
encoding=encoding_iana,
)
else:
decoded_payload = str(
sequences
if strip_sig_or_bom is False
else sequences[len(sig_payload) :],
encoding=encoding_iana,
)
except (UnicodeDecodeError, LookupError) as e:
if not isinstance(e, LookupError):
logger.debug(
"Code page %s does not fit given bytes sequence at ALL. %s",
encoding_iana,
str(e),
)
tested_but_hard_failure.append(encoding_iana)
continue
similar_soft_failure_test = False # type: bool
for encoding_soft_failed in tested_but_soft_failure:
if is_cp_similar(encoding_iana, encoding_soft_failed):
similar_soft_failure_test = True
break
if similar_soft_failure_test:
logger.debug(
"%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
encoding_iana,
encoding_soft_failed,
)
continue
r_ = range(
0 if not bom_or_sig_available else len(sig_payload),
length,
int(length / steps),
)
multi_byte_bonus = (
is_multi_byte_decoder
and decoded_payload is not None
and len(decoded_payload) < length
) # type: bool
if multi_byte_bonus:
logger.debug(
"Code page %s is a multi byte encoding table and it appear that at least one character "
"was encoded using n-bytes.",
encoding_iana,
)
max_chunk_gave_up = int(len(r_) / 4) # type: int
max_chunk_gave_up = max(max_chunk_gave_up, 2)
early_stop_count = 0 # type: int
lazy_str_hard_failure = False
md_chunks = [] # type: List[str]
md_ratios = []
for i in r_:
if i + chunk_size > length + 8:
continue
cut_sequence = sequences[i : i + chunk_size]
if bom_or_sig_available and strip_sig_or_bom is False:
cut_sequence = sig_payload + cut_sequence
try:
chunk = cut_sequence.decode(
encoding_iana,
errors="ignore" if is_multi_byte_decoder else "strict",
) # type: str
except UnicodeDecodeError as e: # Lazy str loading may have missed something there
logger.debug(
"LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
encoding_iana,
str(e),
)
early_stop_count = max_chunk_gave_up
lazy_str_hard_failure = True
break
# multi-byte bad cutting detector and adjustment
# not the cleanest way to perform that fix but clever enough for now.
if is_multi_byte_decoder and i > 0 and sequences[i] >= 0x80:
chunk_partial_size_chk = min(chunk_size, 16) # type: int
if (
decoded_payload
and chunk[:chunk_partial_size_chk] not in decoded_payload
):
for j in range(i, i - 4, -1):
cut_sequence = sequences[j : i + chunk_size]
if bom_or_sig_available and strip_sig_or_bom is False:
cut_sequence = sig_payload + cut_sequence
chunk = cut_sequence.decode(encoding_iana, errors="ignore")
if chunk[:chunk_partial_size_chk] in decoded_payload:
break
md_chunks.append(chunk)
md_ratios.append(mess_ratio(chunk, threshold))
if md_ratios[-1] >= threshold:
early_stop_count += 1
if (early_stop_count >= max_chunk_gave_up) or (
bom_or_sig_available and strip_sig_or_bom is False
):
break
# We might want to check the sequence again with the whole content
# Only if initial MD tests passes
if (
not lazy_str_hard_failure
and is_too_large_sequence
and not is_multi_byte_decoder
):
try:
sequences[int(50e3) :].decode(encoding_iana, errors="strict")
except UnicodeDecodeError as e:
logger.debug(
"LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
encoding_iana,
str(e),
)
tested_but_hard_failure.append(encoding_iana)
continue
mean_mess_ratio = (
sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
) # type: float
if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
tested_but_soft_failure.append(encoding_iana)
logger.info(
"%s was excluded because of initial chaos probing. Gave up %i time(s). "
"Computed mean chaos is %f %%.",
encoding_iana,
early_stop_count,
round(mean_mess_ratio * 100, ndigits=3),
)
# Preparing those fallbacks in case we got nothing.
if (
encoding_iana in ["ascii", "utf_8", specified_encoding]
and not lazy_str_hard_failure
):
fallback_entry = CharsetMatch(
sequences, encoding_iana, threshold, False, [], decoded_payload
)
if encoding_iana == specified_encoding:
fallback_specified = fallback_entry
elif encoding_iana == "ascii":
fallback_ascii = fallback_entry
else:
fallback_u8 = fallback_entry
continue
logger.info(
"%s passed initial chaos probing. Mean measured chaos is %f %%",
encoding_iana,
round(mean_mess_ratio * 100, ndigits=3),
)
if not is_multi_byte_decoder:
target_languages = encoding_languages(encoding_iana) # type: List[str]
else:
target_languages = mb_encoding_languages(encoding_iana)
if target_languages:
logger.debug(
"{} should target any language(s) of {}".format(
encoding_iana, str(target_languages)
)
)
cd_ratios = []
# We shall skip the CD when its about ASCII
# Most of the time its not relevant to run "language-detection" on it.
if encoding_iana != "ascii":
for chunk in md_chunks:
chunk_languages = coherence_ratio(
chunk, 0.1, ",".join(target_languages) if target_languages else None
)
cd_ratios.append(chunk_languages)
cd_ratios_merged = merge_coherence_ratios(cd_ratios)
if cd_ratios_merged:
logger.info(
"We detected language {} using {}".format(
cd_ratios_merged, encoding_iana
)
)
results.append(
CharsetMatch(
sequences,
encoding_iana,
mean_mess_ratio,
bom_or_sig_available,
cd_ratios_merged,
decoded_payload,
)
)
if (
encoding_iana in [specified_encoding, "ascii", "utf_8"]
and mean_mess_ratio < 0.1
):
logger.info(
"%s is most likely the one. Stopping the process.", encoding_iana
)
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level)
return CharsetMatches([results[encoding_iana]])
if encoding_iana == sig_encoding:
logger.info(
"%s is most likely the one as we detected a BOM or SIG within the beginning of the sequence.",
encoding_iana,
)
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level)
return CharsetMatches([results[encoding_iana]])
if len(results) == 0:
if fallback_u8 or fallback_ascii or fallback_specified:
logger.debug(
"Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback."
)
if fallback_specified:
logger.debug(
"%s will be used as a fallback match", fallback_specified.encoding
)
results.append(fallback_specified)
elif (
(fallback_u8 and fallback_ascii is None)
or (
fallback_u8
and fallback_ascii
and fallback_u8.fingerprint != fallback_ascii.fingerprint
)
or (fallback_u8 is not None)
):
logger.warning("utf_8 will be used as a fallback match")
results.append(fallback_u8)
elif fallback_ascii:
logger.warning("ascii will be used as a fallback match")
results.append(fallback_ascii)
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level)
return results
def from_fp(
fp: BinaryIO,
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.20,
cp_isolation: List[str] = None,
cp_exclusion: List[str] = None,
preemptive_behaviour: bool = True,
explain: bool = False,
) -> CharsetMatches:
"""
Same thing than the function from_bytes but using a file pointer that is already ready.
Will not close the file pointer.
"""
return from_bytes(
fp.read(),
steps,
chunk_size,
threshold,
cp_isolation,
cp_exclusion,
preemptive_behaviour,
explain,
)
def from_path(
path: PathLike,
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.20,
cp_isolation: List[str] = None,
cp_exclusion: List[str] = None,
preemptive_behaviour: bool = True,
explain: bool = False,
) -> CharsetMatches:
"""
Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
Can raise IOError.
"""
with open(path, "rb") as fp:
return from_fp(
fp,
steps,
chunk_size,
threshold,
cp_isolation,
cp_exclusion,
preemptive_behaviour,
explain,
)
def normalize(
path: PathLike,
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.20,
cp_isolation: List[str] = None,
cp_exclusion: List[str] = None,
preemptive_behaviour: bool = True,
) -> CharsetMatch:
"""
Take a (text-based) file path and try to create another file next to it, this time using UTF-8.
"""
results = from_path(
path,
steps,
chunk_size,
threshold,
cp_isolation,
cp_exclusion,
preemptive_behaviour,
)
filename = basename(path)
target_extensions = list(splitext(filename))
if len(results) == 0:
raise IOError(
'Unable to normalize "{}", no encoding charset seems to fit.'.format(
filename
)
)
result = results.best()
target_extensions[0] += "-" + result.encoding # type: ignore
with open(
"{}".format(str(path).replace(filename, "".join(target_extensions))), "wb"
) as fp:
fp.write(result.output()) # type: ignore
return result # type: ignore
| 19,377
|
Python
|
.py
| 486
| 28.8107
| 120
| 0.575152
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,857
|
legacy.py
|
rembo10_headphones/lib/charset_normalizer/legacy.py
|
import warnings
from typing import Dict, Optional, Union
from .api import from_bytes, from_fp, from_path, normalize
from .constant import CHARDET_CORRESPONDENCE
from .models import CharsetMatch, CharsetMatches
def detect(byte_str: bytes) -> Dict[str, Optional[Union[str, float]]]:
"""
chardet legacy method
Detect the encoding of the given byte string. It should be mostly backward-compatible.
Encoding name will match Chardet own writing whenever possible. (Not on encoding name unsupported by it)
This function is deprecated and should be used to migrate your project easily, consult the documentation for
further information. Not planned for removal.
:param byte_str: The byte sequence to examine.
"""
if not isinstance(byte_str, (bytearray, bytes)):
raise TypeError( # pragma: nocover
"Expected object of type bytes or bytearray, got: "
"{0}".format(type(byte_str))
)
if isinstance(byte_str, bytearray):
byte_str = bytes(byte_str)
r = from_bytes(byte_str).best()
encoding = r.encoding if r is not None else None
language = r.language if r is not None and r.language != "Unknown" else ""
confidence = 1.0 - r.chaos if r is not None else None
# Note: CharsetNormalizer does not return 'UTF-8-SIG' as the sig get stripped in the detection/normalization process
# but chardet does return 'utf-8-sig' and it is a valid codec name.
if r is not None and encoding == "utf_8" and r.bom:
encoding += "_sig"
return {
"encoding": encoding
if encoding not in CHARDET_CORRESPONDENCE
else CHARDET_CORRESPONDENCE[encoding],
"language": language,
"confidence": confidence,
}
class CharsetNormalizerMatch(CharsetMatch):
pass
class CharsetNormalizerMatches(CharsetMatches):
@staticmethod
def from_fp(*args, **kwargs): # type: ignore
warnings.warn( # pragma: nocover
"staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
"and scheduled to be removed in 3.0",
DeprecationWarning,
)
return from_fp(*args, **kwargs) # pragma: nocover
@staticmethod
def from_bytes(*args, **kwargs): # type: ignore
warnings.warn( # pragma: nocover
"staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
"and scheduled to be removed in 3.0",
DeprecationWarning,
)
return from_bytes(*args, **kwargs) # pragma: nocover
@staticmethod
def from_path(*args, **kwargs): # type: ignore
warnings.warn( # pragma: nocover
"staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
"and scheduled to be removed in 3.0",
DeprecationWarning,
)
return from_path(*args, **kwargs) # pragma: nocover
@staticmethod
def normalize(*args, **kwargs): # type: ignore
warnings.warn( # pragma: nocover
"staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
"and scheduled to be removed in 3.0",
DeprecationWarning,
)
return normalize(*args, **kwargs) # pragma: nocover
class CharsetDetector(CharsetNormalizerMatches):
pass
class CharsetDoctor(CharsetNormalizerMatches):
pass
| 3,384
|
Python
|
.py
| 75
| 37.773333
| 120
| 0.672849
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,858
|
utils.py
|
rembo10_headphones/lib/charset_normalizer/utils.py
|
try:
import unicodedata2 as unicodedata
except ImportError:
import unicodedata # type: ignore[no-redef]
import importlib
import logging
from codecs import IncrementalDecoder
from encodings.aliases import aliases
from functools import lru_cache
from re import findall
from typing import List, Optional, Set, Tuple, Union
from _multibytecodec import MultibyteIncrementalDecoder # type: ignore
from .constant import (
ENCODING_MARKS,
IANA_SUPPORTED_SIMILAR,
RE_POSSIBLE_ENCODING_INDICATION,
UNICODE_RANGES_COMBINED,
UNICODE_SECONDARY_RANGE_KEYWORD,
UTF8_MAXIMAL_ALLOCATION,
)
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_accentuated(character: str) -> bool:
try:
description = unicodedata.name(character) # type: str
except ValueError:
return False
return (
"WITH GRAVE" in description
or "WITH ACUTE" in description
or "WITH CEDILLA" in description
or "WITH DIAERESIS" in description
or "WITH CIRCUMFLEX" in description
or "WITH TILDE" in description
)
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def remove_accent(character: str) -> str:
decomposed = unicodedata.decomposition(character) # type: str
if not decomposed:
return character
codes = decomposed.split(" ") # type: List[str]
return chr(int(codes[0], 16))
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def unicode_range(character: str) -> Optional[str]:
"""
Retrieve the Unicode range official name from a single character.
"""
character_ord = ord(character) # type: int
for range_name, ord_range in UNICODE_RANGES_COMBINED.items():
if character_ord in ord_range:
return range_name
return None
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_latin(character: str) -> bool:
try:
description = unicodedata.name(character) # type: str
except ValueError:
return False
return "LATIN" in description
def is_ascii(character: str) -> bool:
try:
character.encode("ascii")
except UnicodeEncodeError:
return False
return True
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_punctuation(character: str) -> bool:
character_category = unicodedata.category(character) # type: str
if "P" in character_category:
return True
character_range = unicode_range(character) # type: Optional[str]
if character_range is None:
return False
return "Punctuation" in character_range
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_symbol(character: str) -> bool:
character_category = unicodedata.category(character) # type: str
if "S" in character_category or "N" in character_category:
return True
character_range = unicode_range(character) # type: Optional[str]
if character_range is None:
return False
return "Forms" in character_range
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_emoticon(character: str) -> bool:
character_range = unicode_range(character) # type: Optional[str]
if character_range is None:
return False
return "Emoticons" in character_range
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_separator(character: str) -> bool:
if character.isspace() or character in {"|", "+", ",", ";", "<", ">"}:
return True
character_category = unicodedata.category(character) # type: str
return "Z" in character_category
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_case_variable(character: str) -> bool:
return character.islower() != character.isupper()
def is_private_use_only(character: str) -> bool:
character_category = unicodedata.category(character) # type: str
return character_category == "Co"
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_cjk(character: str) -> bool:
try:
character_name = unicodedata.name(character)
except ValueError:
return False
return "CJK" in character_name
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_hiragana(character: str) -> bool:
try:
character_name = unicodedata.name(character)
except ValueError:
return False
return "HIRAGANA" in character_name
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_katakana(character: str) -> bool:
try:
character_name = unicodedata.name(character)
except ValueError:
return False
return "KATAKANA" in character_name
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_hangul(character: str) -> bool:
try:
character_name = unicodedata.name(character)
except ValueError:
return False
return "HANGUL" in character_name
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_thai(character: str) -> bool:
try:
character_name = unicodedata.name(character)
except ValueError:
return False
return "THAI" in character_name
@lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))
def is_unicode_range_secondary(range_name: str) -> bool:
return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)
def any_specified_encoding(sequence: bytes, search_zone: int = 4096) -> Optional[str]:
"""
Extract using ASCII-only decoder any specified encoding in the first n-bytes.
"""
if not isinstance(sequence, bytes):
raise TypeError
seq_len = len(sequence) # type: int
results = findall(
RE_POSSIBLE_ENCODING_INDICATION,
sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"),
) # type: List[str]
if len(results) == 0:
return None
for specified_encoding in results:
specified_encoding = specified_encoding.lower().replace("-", "_")
for encoding_alias, encoding_iana in aliases.items():
if encoding_alias == specified_encoding:
return encoding_iana
if encoding_iana == specified_encoding:
return encoding_iana
return None
@lru_cache(maxsize=128)
def is_multi_byte_encoding(name: str) -> bool:
"""
Verify is a specific encoding is a multi byte one based on it IANA name
"""
return name in {
"utf_8",
"utf_8_sig",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_32",
"utf_32_le",
"utf_32_be",
"utf_7",
} or issubclass(
importlib.import_module("encodings.{}".format(name)).IncrementalDecoder, # type: ignore
MultibyteIncrementalDecoder,
)
def identify_sig_or_bom(sequence: bytes) -> Tuple[Optional[str], bytes]:
"""
Identify and extract SIG/BOM in given sequence.
"""
for iana_encoding in ENCODING_MARKS:
marks = ENCODING_MARKS[iana_encoding] # type: Union[bytes, List[bytes]]
if isinstance(marks, bytes):
marks = [marks]
for mark in marks:
if sequence.startswith(mark):
return iana_encoding, mark
return None, b""
def should_strip_sig_or_bom(iana_encoding: str) -> bool:
return iana_encoding not in {"utf_16", "utf_32"}
def iana_name(cp_name: str, strict: bool = True) -> str:
cp_name = cp_name.lower().replace("-", "_")
for encoding_alias, encoding_iana in aliases.items():
if cp_name in [encoding_alias, encoding_iana]:
return encoding_iana
if strict:
raise ValueError("Unable to retrieve IANA for '{}'".format(cp_name))
return cp_name
def range_scan(decoded_sequence: str) -> List[str]:
ranges = set() # type: Set[str]
for character in decoded_sequence:
character_range = unicode_range(character) # type: Optional[str]
if character_range is None:
continue
ranges.add(character_range)
return list(ranges)
def cp_similarity(iana_name_a: str, iana_name_b: str) -> float:
if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b):
return 0.0
decoder_a = importlib.import_module("encodings.{}".format(iana_name_a)).IncrementalDecoder # type: ignore
decoder_b = importlib.import_module("encodings.{}".format(iana_name_b)).IncrementalDecoder # type: ignore
id_a = decoder_a(errors="ignore") # type: IncrementalDecoder
id_b = decoder_b(errors="ignore") # type: IncrementalDecoder
character_match_count = 0 # type: int
for i in range(255):
to_be_decoded = bytes([i]) # type: bytes
if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded):
character_match_count += 1
return character_match_count / 254
def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool:
"""
Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using
the function cp_similarity.
"""
return (
iana_name_a in IANA_SUPPORTED_SIMILAR
and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a]
)
def set_logging_handler(
name: str = "charset_normalizer",
level: int = logging.INFO,
format_string: str = "%(asctime)s | %(levelname)s | %(message)s",
) -> None:
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(format_string))
logger.addHandler(handler)
| 9,308
|
Python
|
.py
| 240
| 32.858333
| 110
| 0.686148
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,859
|
__init__.py
|
rembo10_headphones/lib/charset_normalizer/__init__.py
|
# -*- coding: utf_8 -*-
"""
Charset-Normalizer
~~~~~~~~~~~~~~
The Real First Universal Charset Detector.
A library that helps you read text from an unknown charset encoding.
Motivated by chardet, This package is trying to resolve the issue by taking a new approach.
All IANA character set names for which the Python core library provides codecs are supported.
Basic usage:
>>> from charset_normalizer import from_bytes
>>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8'))
>>> best_guess = results.best()
>>> str(best_guess)
'Bсеки човек има право на образование. Oбразованието!'
Others methods and usages are available - see the full documentation
at <https://github.com/Ousret/charset_normalizer>.
:copyright: (c) 2021 by Ahmed TAHRI
:license: MIT, see LICENSE for more details.
"""
import logging
from .api import from_bytes, from_fp, from_path, normalize
from .legacy import (
CharsetDetector,
CharsetDoctor,
CharsetNormalizerMatch,
CharsetNormalizerMatches,
detect,
)
from .models import CharsetMatch, CharsetMatches
from .utils import set_logging_handler
from .version import VERSION, __version__
__all__ = (
"from_fp",
"from_path",
"from_bytes",
"normalize",
"detect",
"CharsetMatch",
"CharsetMatches",
"CharsetNormalizerMatch",
"CharsetNormalizerMatches",
"CharsetDetector",
"CharsetDoctor",
"__version__",
"VERSION",
"set_logging_handler",
)
# Attach a NullHandler to the top level logger by default
# https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger("charset_normalizer").addHandler(logging.NullHandler())
| 1,790
|
Python
|
.py
| 50
| 31.18
| 99
| 0.740606
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,860
|
constant.py
|
rembo10_headphones/lib/charset_normalizer/constant.py
|
from codecs import BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE
from collections import OrderedDict
from encodings.aliases import aliases
from re import IGNORECASE, compile as re_compile
from typing import Dict, List, Set, Union
from .assets import FREQUENCIES
# Contain for each eligible encoding a list of/item bytes SIG/BOM
ENCODING_MARKS = OrderedDict(
[
("utf_8", BOM_UTF8),
(
"utf_7",
[
b"\x2b\x2f\x76\x38",
b"\x2b\x2f\x76\x39",
b"\x2b\x2f\x76\x2b",
b"\x2b\x2f\x76\x2f",
b"\x2b\x2f\x76\x38\x2d",
],
),
("gb18030", b"\x84\x31\x95\x33"),
("utf_32", [BOM_UTF32_BE, BOM_UTF32_LE]),
("utf_16", [BOM_UTF16_BE, BOM_UTF16_LE]),
]
) # type: Dict[str, Union[bytes, List[bytes]]]
TOO_SMALL_SEQUENCE = 32 # type: int
TOO_BIG_SEQUENCE = int(10e6) # type: int
UTF8_MAXIMAL_ALLOCATION = 1112064 # type: int
UNICODE_RANGES_COMBINED = {
"Control character": range(31 + 1),
"Basic Latin": range(32, 127 + 1),
"Latin-1 Supplement": range(128, 255 + 1),
"Latin Extended-A": range(256, 383 + 1),
"Latin Extended-B": range(384, 591 + 1),
"IPA Extensions": range(592, 687 + 1),
"Spacing Modifier Letters": range(688, 767 + 1),
"Combining Diacritical Marks": range(768, 879 + 1),
"Greek and Coptic": range(880, 1023 + 1),
"Cyrillic": range(1024, 1279 + 1),
"Cyrillic Supplement": range(1280, 1327 + 1),
"Armenian": range(1328, 1423 + 1),
"Hebrew": range(1424, 1535 + 1),
"Arabic": range(1536, 1791 + 1),
"Syriac": range(1792, 1871 + 1),
"Arabic Supplement": range(1872, 1919 + 1),
"Thaana": range(1920, 1983 + 1),
"NKo": range(1984, 2047 + 1),
"Samaritan": range(2048, 2111 + 1),
"Mandaic": range(2112, 2143 + 1),
"Syriac Supplement": range(2144, 2159 + 1),
"Arabic Extended-A": range(2208, 2303 + 1),
"Devanagari": range(2304, 2431 + 1),
"Bengali": range(2432, 2559 + 1),
"Gurmukhi": range(2560, 2687 + 1),
"Gujarati": range(2688, 2815 + 1),
"Oriya": range(2816, 2943 + 1),
"Tamil": range(2944, 3071 + 1),
"Telugu": range(3072, 3199 + 1),
"Kannada": range(3200, 3327 + 1),
"Malayalam": range(3328, 3455 + 1),
"Sinhala": range(3456, 3583 + 1),
"Thai": range(3584, 3711 + 1),
"Lao": range(3712, 3839 + 1),
"Tibetan": range(3840, 4095 + 1),
"Myanmar": range(4096, 4255 + 1),
"Georgian": range(4256, 4351 + 1),
"Hangul Jamo": range(4352, 4607 + 1),
"Ethiopic": range(4608, 4991 + 1),
"Ethiopic Supplement": range(4992, 5023 + 1),
"Cherokee": range(5024, 5119 + 1),
"Unified Canadian Aboriginal Syllabics": range(5120, 5759 + 1),
"Ogham": range(5760, 5791 + 1),
"Runic": range(5792, 5887 + 1),
"Tagalog": range(5888, 5919 + 1),
"Hanunoo": range(5920, 5951 + 1),
"Buhid": range(5952, 5983 + 1),
"Tagbanwa": range(5984, 6015 + 1),
"Khmer": range(6016, 6143 + 1),
"Mongolian": range(6144, 6319 + 1),
"Unified Canadian Aboriginal Syllabics Extended": range(6320, 6399 + 1),
"Limbu": range(6400, 6479 + 1),
"Tai Le": range(6480, 6527 + 1),
"New Tai Lue": range(6528, 6623 + 1),
"Khmer Symbols": range(6624, 6655 + 1),
"Buginese": range(6656, 6687 + 1),
"Tai Tham": range(6688, 6831 + 1),
"Combining Diacritical Marks Extended": range(6832, 6911 + 1),
"Balinese": range(6912, 7039 + 1),
"Sundanese": range(7040, 7103 + 1),
"Batak": range(7104, 7167 + 1),
"Lepcha": range(7168, 7247 + 1),
"Ol Chiki": range(7248, 7295 + 1),
"Cyrillic Extended C": range(7296, 7311 + 1),
"Sundanese Supplement": range(7360, 7375 + 1),
"Vedic Extensions": range(7376, 7423 + 1),
"Phonetic Extensions": range(7424, 7551 + 1),
"Phonetic Extensions Supplement": range(7552, 7615 + 1),
"Combining Diacritical Marks Supplement": range(7616, 7679 + 1),
"Latin Extended Additional": range(7680, 7935 + 1),
"Greek Extended": range(7936, 8191 + 1),
"General Punctuation": range(8192, 8303 + 1),
"Superscripts and Subscripts": range(8304, 8351 + 1),
"Currency Symbols": range(8352, 8399 + 1),
"Combining Diacritical Marks for Symbols": range(8400, 8447 + 1),
"Letterlike Symbols": range(8448, 8527 + 1),
"Number Forms": range(8528, 8591 + 1),
"Arrows": range(8592, 8703 + 1),
"Mathematical Operators": range(8704, 8959 + 1),
"Miscellaneous Technical": range(8960, 9215 + 1),
"Control Pictures": range(9216, 9279 + 1),
"Optical Character Recognition": range(9280, 9311 + 1),
"Enclosed Alphanumerics": range(9312, 9471 + 1),
"Box Drawing": range(9472, 9599 + 1),
"Block Elements": range(9600, 9631 + 1),
"Geometric Shapes": range(9632, 9727 + 1),
"Miscellaneous Symbols": range(9728, 9983 + 1),
"Dingbats": range(9984, 10175 + 1),
"Miscellaneous Mathematical Symbols-A": range(10176, 10223 + 1),
"Supplemental Arrows-A": range(10224, 10239 + 1),
"Braille Patterns": range(10240, 10495 + 1),
"Supplemental Arrows-B": range(10496, 10623 + 1),
"Miscellaneous Mathematical Symbols-B": range(10624, 10751 + 1),
"Supplemental Mathematical Operators": range(10752, 11007 + 1),
"Miscellaneous Symbols and Arrows": range(11008, 11263 + 1),
"Glagolitic": range(11264, 11359 + 1),
"Latin Extended-C": range(11360, 11391 + 1),
"Coptic": range(11392, 11519 + 1),
"Georgian Supplement": range(11520, 11567 + 1),
"Tifinagh": range(11568, 11647 + 1),
"Ethiopic Extended": range(11648, 11743 + 1),
"Cyrillic Extended-A": range(11744, 11775 + 1),
"Supplemental Punctuation": range(11776, 11903 + 1),
"CJK Radicals Supplement": range(11904, 12031 + 1),
"Kangxi Radicals": range(12032, 12255 + 1),
"Ideographic Description Characters": range(12272, 12287 + 1),
"CJK Symbols and Punctuation": range(12288, 12351 + 1),
"Hiragana": range(12352, 12447 + 1),
"Katakana": range(12448, 12543 + 1),
"Bopomofo": range(12544, 12591 + 1),
"Hangul Compatibility Jamo": range(12592, 12687 + 1),
"Kanbun": range(12688, 12703 + 1),
"Bopomofo Extended": range(12704, 12735 + 1),
"CJK Strokes": range(12736, 12783 + 1),
"Katakana Phonetic Extensions": range(12784, 12799 + 1),
"Enclosed CJK Letters and Months": range(12800, 13055 + 1),
"CJK Compatibility": range(13056, 13311 + 1),
"CJK Unified Ideographs Extension A": range(13312, 19903 + 1),
"Yijing Hexagram Symbols": range(19904, 19967 + 1),
"CJK Unified Ideographs": range(19968, 40959 + 1),
"Yi Syllables": range(40960, 42127 + 1),
"Yi Radicals": range(42128, 42191 + 1),
"Lisu": range(42192, 42239 + 1),
"Vai": range(42240, 42559 + 1),
"Cyrillic Extended-B": range(42560, 42655 + 1),
"Bamum": range(42656, 42751 + 1),
"Modifier Tone Letters": range(42752, 42783 + 1),
"Latin Extended-D": range(42784, 43007 + 1),
"Syloti Nagri": range(43008, 43055 + 1),
"Common Indic Number Forms": range(43056, 43071 + 1),
"Phags-pa": range(43072, 43135 + 1),
"Saurashtra": range(43136, 43231 + 1),
"Devanagari Extended": range(43232, 43263 + 1),
"Kayah Li": range(43264, 43311 + 1),
"Rejang": range(43312, 43359 + 1),
"Hangul Jamo Extended-A": range(43360, 43391 + 1),
"Javanese": range(43392, 43487 + 1),
"Myanmar Extended-B": range(43488, 43519 + 1),
"Cham": range(43520, 43615 + 1),
"Myanmar Extended-A": range(43616, 43647 + 1),
"Tai Viet": range(43648, 43743 + 1),
"Meetei Mayek Extensions": range(43744, 43775 + 1),
"Ethiopic Extended-A": range(43776, 43823 + 1),
"Latin Extended-E": range(43824, 43887 + 1),
"Cherokee Supplement": range(43888, 43967 + 1),
"Meetei Mayek": range(43968, 44031 + 1),
"Hangul Syllables": range(44032, 55215 + 1),
"Hangul Jamo Extended-B": range(55216, 55295 + 1),
"High Surrogates": range(55296, 56191 + 1),
"High Private Use Surrogates": range(56192, 56319 + 1),
"Low Surrogates": range(56320, 57343 + 1),
"Private Use Area": range(57344, 63743 + 1),
"CJK Compatibility Ideographs": range(63744, 64255 + 1),
"Alphabetic Presentation Forms": range(64256, 64335 + 1),
"Arabic Presentation Forms-A": range(64336, 65023 + 1),
"Variation Selectors": range(65024, 65039 + 1),
"Vertical Forms": range(65040, 65055 + 1),
"Combining Half Marks": range(65056, 65071 + 1),
"CJK Compatibility Forms": range(65072, 65103 + 1),
"Small Form Variants": range(65104, 65135 + 1),
"Arabic Presentation Forms-B": range(65136, 65279 + 1),
"Halfwidth and Fullwidth Forms": range(65280, 65519 + 1),
"Specials": range(65520, 65535 + 1),
"Linear B Syllabary": range(65536, 65663 + 1),
"Linear B Ideograms": range(65664, 65791 + 1),
"Aegean Numbers": range(65792, 65855 + 1),
"Ancient Greek Numbers": range(65856, 65935 + 1),
"Ancient Symbols": range(65936, 65999 + 1),
"Phaistos Disc": range(66000, 66047 + 1),
"Lycian": range(66176, 66207 + 1),
"Carian": range(66208, 66271 + 1),
"Coptic Epact Numbers": range(66272, 66303 + 1),
"Old Italic": range(66304, 66351 + 1),
"Gothic": range(66352, 66383 + 1),
"Old Permic": range(66384, 66431 + 1),
"Ugaritic": range(66432, 66463 + 1),
"Old Persian": range(66464, 66527 + 1),
"Deseret": range(66560, 66639 + 1),
"Shavian": range(66640, 66687 + 1),
"Osmanya": range(66688, 66735 + 1),
"Osage": range(66736, 66815 + 1),
"Elbasan": range(66816, 66863 + 1),
"Caucasian Albanian": range(66864, 66927 + 1),
"Linear A": range(67072, 67455 + 1),
"Cypriot Syllabary": range(67584, 67647 + 1),
"Imperial Aramaic": range(67648, 67679 + 1),
"Palmyrene": range(67680, 67711 + 1),
"Nabataean": range(67712, 67759 + 1),
"Hatran": range(67808, 67839 + 1),
"Phoenician": range(67840, 67871 + 1),
"Lydian": range(67872, 67903 + 1),
"Meroitic Hieroglyphs": range(67968, 67999 + 1),
"Meroitic Cursive": range(68000, 68095 + 1),
"Kharoshthi": range(68096, 68191 + 1),
"Old South Arabian": range(68192, 68223 + 1),
"Old North Arabian": range(68224, 68255 + 1),
"Manichaean": range(68288, 68351 + 1),
"Avestan": range(68352, 68415 + 1),
"Inscriptional Parthian": range(68416, 68447 + 1),
"Inscriptional Pahlavi": range(68448, 68479 + 1),
"Psalter Pahlavi": range(68480, 68527 + 1),
"Old Turkic": range(68608, 68687 + 1),
"Old Hungarian": range(68736, 68863 + 1),
"Rumi Numeral Symbols": range(69216, 69247 + 1),
"Brahmi": range(69632, 69759 + 1),
"Kaithi": range(69760, 69839 + 1),
"Sora Sompeng": range(69840, 69887 + 1),
"Chakma": range(69888, 69967 + 1),
"Mahajani": range(69968, 70015 + 1),
"Sharada": range(70016, 70111 + 1),
"Sinhala Archaic Numbers": range(70112, 70143 + 1),
"Khojki": range(70144, 70223 + 1),
"Multani": range(70272, 70319 + 1),
"Khudawadi": range(70320, 70399 + 1),
"Grantha": range(70400, 70527 + 1),
"Newa": range(70656, 70783 + 1),
"Tirhuta": range(70784, 70879 + 1),
"Siddham": range(71040, 71167 + 1),
"Modi": range(71168, 71263 + 1),
"Mongolian Supplement": range(71264, 71295 + 1),
"Takri": range(71296, 71375 + 1),
"Ahom": range(71424, 71487 + 1),
"Warang Citi": range(71840, 71935 + 1),
"Zanabazar Square": range(72192, 72271 + 1),
"Soyombo": range(72272, 72367 + 1),
"Pau Cin Hau": range(72384, 72447 + 1),
"Bhaiksuki": range(72704, 72815 + 1),
"Marchen": range(72816, 72895 + 1),
"Masaram Gondi": range(72960, 73055 + 1),
"Cuneiform": range(73728, 74751 + 1),
"Cuneiform Numbers and Punctuation": range(74752, 74879 + 1),
"Early Dynastic Cuneiform": range(74880, 75087 + 1),
"Egyptian Hieroglyphs": range(77824, 78895 + 1),
"Anatolian Hieroglyphs": range(82944, 83583 + 1),
"Bamum Supplement": range(92160, 92735 + 1),
"Mro": range(92736, 92783 + 1),
"Bassa Vah": range(92880, 92927 + 1),
"Pahawh Hmong": range(92928, 93071 + 1),
"Miao": range(93952, 94111 + 1),
"Ideographic Symbols and Punctuation": range(94176, 94207 + 1),
"Tangut": range(94208, 100351 + 1),
"Tangut Components": range(100352, 101119 + 1),
"Kana Supplement": range(110592, 110847 + 1),
"Kana Extended-A": range(110848, 110895 + 1),
"Nushu": range(110960, 111359 + 1),
"Duployan": range(113664, 113823 + 1),
"Shorthand Format Controls": range(113824, 113839 + 1),
"Byzantine Musical Symbols": range(118784, 119039 + 1),
"Musical Symbols": range(119040, 119295 + 1),
"Ancient Greek Musical Notation": range(119296, 119375 + 1),
"Tai Xuan Jing Symbols": range(119552, 119647 + 1),
"Counting Rod Numerals": range(119648, 119679 + 1),
"Mathematical Alphanumeric Symbols": range(119808, 120831 + 1),
"Sutton SignWriting": range(120832, 121519 + 1),
"Glagolitic Supplement": range(122880, 122927 + 1),
"Mende Kikakui": range(124928, 125151 + 1),
"Adlam": range(125184, 125279 + 1),
"Arabic Mathematical Alphabetic Symbols": range(126464, 126719 + 1),
"Mahjong Tiles": range(126976, 127023 + 1),
"Domino Tiles": range(127024, 127135 + 1),
"Playing Cards": range(127136, 127231 + 1),
"Enclosed Alphanumeric Supplement": range(127232, 127487 + 1),
"Enclosed Ideographic Supplement": range(127488, 127743 + 1),
"Miscellaneous Symbols and Pictographs": range(127744, 128511 + 1),
"Emoticons range(Emoji)": range(128512, 128591 + 1),
"Ornamental Dingbats": range(128592, 128639 + 1),
"Transport and Map Symbols": range(128640, 128767 + 1),
"Alchemical Symbols": range(128768, 128895 + 1),
"Geometric Shapes Extended": range(128896, 129023 + 1),
"Supplemental Arrows-C": range(129024, 129279 + 1),
"Supplemental Symbols and Pictographs": range(129280, 129535 + 1),
"CJK Unified Ideographs Extension B": range(131072, 173791 + 1),
"CJK Unified Ideographs Extension C": range(173824, 177983 + 1),
"CJK Unified Ideographs Extension D": range(177984, 178207 + 1),
"CJK Unified Ideographs Extension E": range(178208, 183983 + 1),
"CJK Unified Ideographs Extension F": range(183984, 191471 + 1),
"CJK Compatibility Ideographs Supplement": range(194560, 195103 + 1),
"Tags": range(917504, 917631 + 1),
"Variation Selectors Supplement": range(917760, 917999 + 1),
} # type: Dict[str, range]
UNICODE_SECONDARY_RANGE_KEYWORD = [
"Supplement",
"Extended",
"Extensions",
"Modifier",
"Marks",
"Punctuation",
"Symbols",
"Forms",
"Operators",
"Miscellaneous",
"Drawing",
"Block",
"Shapes",
"Supplemental",
"Tags",
] # type: List[str]
RE_POSSIBLE_ENCODING_INDICATION = re_compile(
r"(?:(?:encoding)|(?:charset)|(?:coding))(?:[\:= ]{1,10})(?:[\"\']?)([a-zA-Z0-9\-_]+)(?:[\"\']?)",
IGNORECASE,
)
IANA_SUPPORTED = sorted(
filter(
lambda x: x.endswith("_codec") is False
and x not in {"rot_13", "tactis", "mbcs"},
list(set(aliases.values())),
)
) # type: List[str]
IANA_SUPPORTED_COUNT = len(IANA_SUPPORTED) # type: int
# pre-computed code page that are similar using the function cp_similarity.
IANA_SUPPORTED_SIMILAR = {
"cp037": ["cp1026", "cp1140", "cp273", "cp500"],
"cp1026": ["cp037", "cp1140", "cp273", "cp500"],
"cp1125": ["cp866"],
"cp1140": ["cp037", "cp1026", "cp273", "cp500"],
"cp1250": ["iso8859_2"],
"cp1251": ["kz1048", "ptcp154"],
"cp1252": ["iso8859_15", "iso8859_9", "latin_1"],
"cp1253": ["iso8859_7"],
"cp1254": ["iso8859_15", "iso8859_9", "latin_1"],
"cp1257": ["iso8859_13"],
"cp273": ["cp037", "cp1026", "cp1140", "cp500"],
"cp437": ["cp850", "cp858", "cp860", "cp861", "cp862", "cp863", "cp865"],
"cp500": ["cp037", "cp1026", "cp1140", "cp273"],
"cp850": ["cp437", "cp857", "cp858", "cp865"],
"cp857": ["cp850", "cp858", "cp865"],
"cp858": ["cp437", "cp850", "cp857", "cp865"],
"cp860": ["cp437", "cp861", "cp862", "cp863", "cp865"],
"cp861": ["cp437", "cp860", "cp862", "cp863", "cp865"],
"cp862": ["cp437", "cp860", "cp861", "cp863", "cp865"],
"cp863": ["cp437", "cp860", "cp861", "cp862", "cp865"],
"cp865": ["cp437", "cp850", "cp857", "cp858", "cp860", "cp861", "cp862", "cp863"],
"cp866": ["cp1125"],
"iso8859_10": ["iso8859_14", "iso8859_15", "iso8859_4", "iso8859_9", "latin_1"],
"iso8859_11": ["tis_620"],
"iso8859_13": ["cp1257"],
"iso8859_14": [
"iso8859_10",
"iso8859_15",
"iso8859_16",
"iso8859_3",
"iso8859_9",
"latin_1",
],
"iso8859_15": [
"cp1252",
"cp1254",
"iso8859_10",
"iso8859_14",
"iso8859_16",
"iso8859_3",
"iso8859_9",
"latin_1",
],
"iso8859_16": [
"iso8859_14",
"iso8859_15",
"iso8859_2",
"iso8859_3",
"iso8859_9",
"latin_1",
],
"iso8859_2": ["cp1250", "iso8859_16", "iso8859_4"],
"iso8859_3": ["iso8859_14", "iso8859_15", "iso8859_16", "iso8859_9", "latin_1"],
"iso8859_4": ["iso8859_10", "iso8859_2", "iso8859_9", "latin_1"],
"iso8859_7": ["cp1253"],
"iso8859_9": [
"cp1252",
"cp1254",
"cp1258",
"iso8859_10",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_3",
"iso8859_4",
"latin_1",
],
"kz1048": ["cp1251", "ptcp154"],
"latin_1": [
"cp1252",
"cp1254",
"cp1258",
"iso8859_10",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_3",
"iso8859_4",
"iso8859_9",
],
"mac_iceland": ["mac_roman", "mac_turkish"],
"mac_roman": ["mac_iceland", "mac_turkish"],
"mac_turkish": ["mac_iceland", "mac_roman"],
"ptcp154": ["cp1251", "kz1048"],
"tis_620": ["iso8859_11"],
} # type: Dict[str, List[str]]
CHARDET_CORRESPONDENCE = {
"iso2022_kr": "ISO-2022-KR",
"iso2022_jp": "ISO-2022-JP",
"euc_kr": "EUC-KR",
"tis_620": "TIS-620",
"utf_32": "UTF-32",
"euc_jp": "EUC-JP",
"koi8_r": "KOI8-R",
"iso8859_1": "ISO-8859-1",
"iso8859_2": "ISO-8859-2",
"iso8859_5": "ISO-8859-5",
"iso8859_6": "ISO-8859-6",
"iso8859_7": "ISO-8859-7",
"iso8859_8": "ISO-8859-8",
"utf_16": "UTF-16",
"cp855": "IBM855",
"mac_cyrillic": "MacCyrillic",
"gb2312": "GB2312",
"gb18030": "GB18030",
"cp932": "CP932",
"cp866": "IBM866",
"utf_8": "utf-8",
"utf_8_sig": "UTF-8-SIG",
"shift_jis": "SHIFT_JIS",
"big5": "Big5",
"cp1250": "windows-1250",
"cp1251": "windows-1251",
"cp1252": "Windows-1252",
"cp1253": "windows-1253",
"cp1255": "windows-1255",
"cp1256": "windows-1256",
"cp1254": "Windows-1254",
"cp949": "CP949",
} # type: Dict[str, str]
COMMON_SAFE_ASCII_CHARACTERS = {
"<",
">",
"=",
":",
"/",
"&",
";",
"{",
"}",
"[",
"]",
",",
"|",
'"',
"-",
} # type: Set[str]
KO_NAMES = {"johab", "cp949", "euc_kr"} # type: Set[str]
ZH_NAMES = {"big5", "cp950", "big5hkscs", "hz"} # type: Set[str]
NOT_PRINTABLE_PATTERN = re_compile(r"[0-9\W\n\r\t]+")
LANGUAGE_SUPPORTED_COUNT = len(FREQUENCIES) # type: int
| 19,396
|
Python
|
.py
| 481
| 34.968815
| 102
| 0.602456
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,861
|
version.py
|
rembo10_headphones/lib/charset_normalizer/version.py
|
"""
Expose version
"""
__version__ = "2.0.10"
VERSION = __version__.split(".")
| 80
|
Python
|
.py
| 5
| 14.8
| 32
| 0.581081
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,862
|
__init__.py
|
rembo10_headphones/lib/charset_normalizer/assets/__init__.py
|
# -*- coding: utf_8 -*-
from collections import OrderedDict
FREQUENCIES = OrderedDict(
[
(
"English",
[
"e",
"a",
"t",
"i",
"o",
"n",
"s",
"r",
"h",
"l",
"d",
"c",
"u",
"m",
"f",
"p",
"g",
"w",
"y",
"b",
"v",
"k",
"x",
"j",
"z",
"q",
],
),
(
"German",
[
"e",
"n",
"i",
"r",
"s",
"t",
"a",
"d",
"h",
"u",
"l",
"g",
"o",
"c",
"m",
"b",
"f",
"k",
"w",
"z",
"p",
"v",
"ü",
"ä",
"ö",
"j",
],
),
(
"French",
[
"e",
"a",
"s",
"n",
"i",
"t",
"r",
"l",
"u",
"o",
"d",
"c",
"p",
"m",
"é",
"v",
"g",
"f",
"b",
"h",
"q",
"à",
"x",
"è",
"y",
"j",
],
),
(
"Dutch",
[
"e",
"n",
"a",
"i",
"r",
"t",
"o",
"d",
"s",
"l",
"g",
"h",
"v",
"m",
"u",
"k",
"c",
"p",
"b",
"w",
"j",
"z",
"f",
"y",
"x",
"ë",
],
),
(
"Italian",
[
"e",
"i",
"a",
"o",
"n",
"l",
"t",
"r",
"s",
"c",
"d",
"u",
"p",
"m",
"g",
"v",
"f",
"b",
"z",
"h",
"q",
"è",
"à",
"k",
"y",
"ò",
],
),
(
"Polish",
[
"a",
"i",
"o",
"e",
"n",
"r",
"z",
"w",
"s",
"c",
"t",
"k",
"y",
"d",
"p",
"m",
"u",
"l",
"j",
"ł",
"g",
"b",
"h",
"ą",
"ę",
"ó",
],
),
(
"Spanish",
[
"e",
"a",
"o",
"n",
"s",
"r",
"i",
"l",
"d",
"t",
"c",
"u",
"m",
"p",
"b",
"g",
"v",
"f",
"y",
"ó",
"h",
"q",
"í",
"j",
"z",
"á",
],
),
(
"Russian",
[
"о",
"а",
"е",
"и",
"н",
"с",
"т",
"р",
"в",
"л",
"к",
"м",
"д",
"п",
"у",
"г",
"я",
"ы",
"з",
"б",
"й",
"ь",
"ч",
"х",
"ж",
"ц",
],
),
(
"Japanese",
[
"の",
"に",
"る",
"た",
"は",
"ー",
"と",
"し",
"を",
"で",
"て",
"が",
"い",
"ン",
"れ",
"な",
"年",
"ス",
"っ",
"ル",
"か",
"ら",
"あ",
"さ",
"も",
"り",
],
),
(
"Portuguese",
[
"a",
"e",
"o",
"s",
"i",
"r",
"d",
"n",
"t",
"m",
"u",
"c",
"l",
"p",
"g",
"v",
"b",
"f",
"h",
"ã",
"q",
"é",
"ç",
"á",
"z",
"í",
],
),
(
"Swedish",
[
"e",
"a",
"n",
"r",
"t",
"s",
"i",
"l",
"d",
"o",
"m",
"k",
"g",
"v",
"h",
"f",
"u",
"p",
"ä",
"c",
"b",
"ö",
"å",
"y",
"j",
"x",
],
),
(
"Chinese",
[
"的",
"一",
"是",
"不",
"了",
"在",
"人",
"有",
"我",
"他",
"这",
"个",
"们",
"中",
"来",
"上",
"大",
"为",
"和",
"国",
"地",
"到",
"以",
"说",
"时",
"要",
"就",
"出",
"会",
],
),
(
"Ukrainian",
[
"о",
"а",
"н",
"і",
"и",
"р",
"в",
"т",
"е",
"с",
"к",
"л",
"у",
"д",
"м",
"п",
"з",
"я",
"ь",
"б",
"г",
"й",
"ч",
"х",
"ц",
"ї",
],
),
(
"Norwegian",
[
"e",
"r",
"n",
"t",
"a",
"s",
"i",
"o",
"l",
"d",
"g",
"k",
"m",
"v",
"f",
"p",
"u",
"b",
"h",
"å",
"y",
"j",
"ø",
"c",
"æ",
"w",
],
),
(
"Finnish",
[
"a",
"i",
"n",
"t",
"e",
"s",
"l",
"o",
"u",
"k",
"ä",
"m",
"r",
"v",
"j",
"h",
"p",
"y",
"d",
"ö",
"g",
"c",
"b",
"f",
"w",
"z",
],
),
(
"Vietnamese",
[
"n",
"h",
"t",
"i",
"c",
"g",
"a",
"o",
"u",
"m",
"l",
"r",
"à",
"đ",
"s",
"e",
"v",
"p",
"b",
"y",
"ư",
"d",
"á",
"k",
"ộ",
"ế",
],
),
(
"Czech",
[
"o",
"e",
"a",
"n",
"t",
"s",
"i",
"l",
"v",
"r",
"k",
"d",
"u",
"m",
"p",
"í",
"c",
"h",
"z",
"á",
"y",
"j",
"b",
"ě",
"é",
"ř",
],
),
(
"Hungarian",
[
"e",
"a",
"t",
"l",
"s",
"n",
"k",
"r",
"i",
"o",
"z",
"á",
"é",
"g",
"m",
"b",
"y",
"v",
"d",
"h",
"u",
"p",
"j",
"ö",
"f",
"c",
],
),
(
"Korean",
[
"이",
"다",
"에",
"의",
"는",
"로",
"하",
"을",
"가",
"고",
"지",
"서",
"한",
"은",
"기",
"으",
"년",
"대",
"사",
"시",
"를",
"리",
"도",
"인",
"스",
"일",
],
),
(
"Indonesian",
[
"a",
"n",
"e",
"i",
"r",
"t",
"u",
"s",
"d",
"k",
"m",
"l",
"g",
"p",
"b",
"o",
"h",
"y",
"j",
"c",
"w",
"f",
"v",
"z",
"x",
"q",
],
),
(
"Turkish",
[
"a",
"e",
"i",
"n",
"r",
"l",
"ı",
"k",
"d",
"t",
"s",
"m",
"y",
"u",
"o",
"b",
"ü",
"ş",
"v",
"g",
"z",
"h",
"c",
"p",
"ç",
"ğ",
],
),
(
"Romanian",
[
"e",
"i",
"a",
"r",
"n",
"t",
"u",
"l",
"o",
"c",
"s",
"d",
"p",
"m",
"ă",
"f",
"v",
"î",
"g",
"b",
"ș",
"ț",
"z",
"h",
"â",
"j",
],
),
(
"Farsi",
[
"ا",
"ی",
"ر",
"د",
"ن",
"ه",
"و",
"م",
"ت",
"ب",
"س",
"ل",
"ک",
"ش",
"ز",
"ف",
"گ",
"ع",
"خ",
"ق",
"ج",
"آ",
"پ",
"ح",
"ط",
"ص",
],
),
(
"Arabic",
[
"ا",
"ل",
"ي",
"م",
"و",
"ن",
"ر",
"ت",
"ب",
"ة",
"ع",
"د",
"س",
"ف",
"ه",
"ك",
"ق",
"أ",
"ح",
"ج",
"ش",
"ط",
"ص",
"ى",
"خ",
"إ",
],
),
(
"Danish",
[
"e",
"r",
"n",
"t",
"a",
"i",
"s",
"d",
"l",
"o",
"g",
"m",
"k",
"f",
"v",
"u",
"b",
"h",
"p",
"å",
"y",
"ø",
"æ",
"c",
"j",
"w",
],
),
(
"Serbian",
[
"а",
"и",
"о",
"е",
"н",
"р",
"с",
"у",
"т",
"к",
"ј",
"в",
"д",
"м",
"п",
"л",
"г",
"з",
"б",
"a",
"i",
"e",
"o",
"n",
"ц",
"ш",
],
),
(
"Lithuanian",
[
"i",
"a",
"s",
"o",
"r",
"e",
"t",
"n",
"u",
"k",
"m",
"l",
"p",
"v",
"d",
"j",
"g",
"ė",
"b",
"y",
"ų",
"š",
"ž",
"c",
"ą",
"į",
],
),
(
"Slovene",
[
"e",
"a",
"i",
"o",
"n",
"r",
"s",
"l",
"t",
"j",
"v",
"k",
"d",
"p",
"m",
"u",
"z",
"b",
"g",
"h",
"č",
"c",
"š",
"ž",
"f",
"y",
],
),
(
"Slovak",
[
"o",
"a",
"e",
"n",
"i",
"r",
"v",
"t",
"s",
"l",
"k",
"d",
"m",
"p",
"u",
"c",
"h",
"j",
"b",
"z",
"á",
"y",
"ý",
"í",
"č",
"é",
],
),
(
"Hebrew",
[
"י",
"ו",
"ה",
"ל",
"ר",
"ב",
"ת",
"מ",
"א",
"ש",
"נ",
"ע",
"ם",
"ד",
"ק",
"ח",
"פ",
"ס",
"כ",
"ג",
"ט",
"צ",
"ן",
"ז",
"ך",
],
),
(
"Bulgarian",
[
"а",
"и",
"о",
"е",
"н",
"т",
"р",
"с",
"в",
"л",
"к",
"д",
"п",
"м",
"з",
"г",
"я",
"ъ",
"у",
"б",
"ч",
"ц",
"й",
"ж",
"щ",
"х",
],
),
(
"Croatian",
[
"a",
"i",
"o",
"e",
"n",
"r",
"j",
"s",
"t",
"u",
"k",
"l",
"v",
"d",
"m",
"p",
"g",
"z",
"b",
"c",
"č",
"h",
"š",
"ž",
"ć",
"f",
],
),
(
"Hindi",
[
"क",
"र",
"स",
"न",
"त",
"म",
"ह",
"प",
"य",
"ल",
"व",
"ज",
"द",
"ग",
"ब",
"श",
"ट",
"अ",
"ए",
"थ",
"भ",
"ड",
"च",
"ध",
"ष",
"इ",
],
),
(
"Estonian",
[
"a",
"i",
"e",
"s",
"t",
"l",
"u",
"n",
"o",
"k",
"r",
"d",
"m",
"v",
"g",
"p",
"j",
"h",
"ä",
"b",
"õ",
"ü",
"f",
"c",
"ö",
"y",
],
),
(
"Simple English",
[
"e",
"a",
"t",
"i",
"o",
"n",
"s",
"r",
"h",
"l",
"d",
"c",
"m",
"u",
"f",
"p",
"g",
"w",
"b",
"y",
"v",
"k",
"j",
"x",
"z",
"q",
],
),
(
"Thai",
[
"า",
"น",
"ร",
"อ",
"ก",
"เ",
"ง",
"ม",
"ย",
"ล",
"ว",
"ด",
"ท",
"ส",
"ต",
"ะ",
"ป",
"บ",
"ค",
"ห",
"แ",
"จ",
"พ",
"ช",
"ข",
"ใ",
],
),
(
"Greek",
[
"α",
"τ",
"ο",
"ι",
"ε",
"ν",
"ρ",
"σ",
"κ",
"η",
"π",
"ς",
"υ",
"μ",
"λ",
"ί",
"ό",
"ά",
"γ",
"έ",
"δ",
"ή",
"ω",
"χ",
"θ",
"ύ",
],
),
(
"Tamil",
[
"க",
"த",
"ப",
"ட",
"ர",
"ம",
"ல",
"ன",
"வ",
"ற",
"ய",
"ள",
"ச",
"ந",
"இ",
"ண",
"அ",
"ஆ",
"ழ",
"ங",
"எ",
"உ",
"ஒ",
"ஸ",
],
),
(
"Classical Chinese",
[
"之",
"年",
"為",
"也",
"以",
"一",
"人",
"其",
"者",
"國",
"有",
"二",
"十",
"於",
"曰",
"三",
"不",
"大",
"而",
"子",
"中",
"五",
"四",
],
),
(
"Kazakh",
[
"а",
"ы",
"е",
"н",
"т",
"р",
"л",
"і",
"д",
"с",
"м",
"қ",
"к",
"о",
"б",
"и",
"у",
"ғ",
"ж",
"ң",
"з",
"ш",
"й",
"п",
"г",
"ө",
],
),
]
)
| 25,485
|
Python
|
.py
| 1,243
| 3.93564
| 35
| 0.059223
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,863
|
normalizer.py
|
rembo10_headphones/lib/charset_normalizer/cli/normalizer.py
|
import argparse
import sys
from json import dumps
from os.path import abspath
from platform import python_version
from typing import List
from charset_normalizer import from_fp
from charset_normalizer.models import CliDetectionResult
from charset_normalizer.version import __version__
def query_yes_no(question: str, default: str = "yes") -> bool:
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
Credit goes to (c) https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input
"""
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == "":
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n")
def cli_detect(argv: List[str] = None) -> int:
"""
CLI assistant using ARGV and ArgumentParser
:param argv:
:return: 0 if everything is fine, anything else equal trouble
"""
parser = argparse.ArgumentParser(
description="The Real First Universal Charset Detector. "
"Discover originating encoding used on text file. "
"Normalize text to unicode."
)
parser.add_argument(
"files", type=argparse.FileType("rb"), nargs="+", help="File(s) to be analysed"
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=False,
dest="verbose",
help="Display complementary information about file if any. "
"Stdout will contain logs about the detection process.",
)
parser.add_argument(
"-a",
"--with-alternative",
action="store_true",
default=False,
dest="alternatives",
help="Output complementary possibilities if any. Top-level JSON WILL be a list.",
)
parser.add_argument(
"-n",
"--normalize",
action="store_true",
default=False,
dest="normalize",
help="Permit to normalize input file. If not set, program does not write anything.",
)
parser.add_argument(
"-m",
"--minimal",
action="store_true",
default=False,
dest="minimal",
help="Only output the charset detected to STDOUT. Disabling JSON output.",
)
parser.add_argument(
"-r",
"--replace",
action="store_true",
default=False,
dest="replace",
help="Replace file when trying to normalize it instead of creating a new one.",
)
parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
dest="force",
help="Replace file without asking if you are sure, use this flag with caution.",
)
parser.add_argument(
"-t",
"--threshold",
action="store",
default=0.1,
type=float,
dest="threshold",
help="Define a custom maximum amount of chaos allowed in decoded content. 0. <= chaos <= 1.",
)
parser.add_argument(
"--version",
action="version",
version="Charset-Normalizer {} - Python {}".format(
__version__, python_version()
),
help="Show version information and exit.",
)
args = parser.parse_args(argv)
if args.replace is True and args.normalize is False:
print("Use --replace in addition of --normalize only.", file=sys.stderr)
return 1
if args.force is True and args.replace is False:
print("Use --force in addition of --replace only.", file=sys.stderr)
return 1
if args.threshold < 0.0 or args.threshold > 1.0:
print("--threshold VALUE should be between 0. AND 1.", file=sys.stderr)
return 1
x_ = []
for my_file in args.files:
matches = from_fp(my_file, threshold=args.threshold, explain=args.verbose)
best_guess = matches.best()
if best_guess is None:
print(
'Unable to identify originating encoding for "{}". {}'.format(
my_file.name,
"Maybe try increasing maximum amount of chaos."
if args.threshold < 1.0
else "",
),
file=sys.stderr,
)
x_.append(
CliDetectionResult(
abspath(my_file.name),
None,
[],
[],
"Unknown",
[],
False,
1.0,
0.0,
None,
True,
)
)
else:
x_.append(
CliDetectionResult(
abspath(my_file.name),
best_guess.encoding,
best_guess.encoding_aliases,
[
cp
for cp in best_guess.could_be_from_charset
if cp != best_guess.encoding
],
best_guess.language,
best_guess.alphabets,
best_guess.bom,
best_guess.percent_chaos,
best_guess.percent_coherence,
None,
True,
)
)
if len(matches) > 1 and args.alternatives:
for el in matches:
if el != best_guess:
x_.append(
CliDetectionResult(
abspath(my_file.name),
el.encoding,
el.encoding_aliases,
[
cp
for cp in el.could_be_from_charset
if cp != el.encoding
],
el.language,
el.alphabets,
el.bom,
el.percent_chaos,
el.percent_coherence,
None,
False,
)
)
if args.normalize is True:
if best_guess.encoding.startswith("utf") is True:
print(
'"{}" file does not need to be normalized, as it already came from unicode.'.format(
my_file.name
),
file=sys.stderr,
)
if my_file.closed is False:
my_file.close()
continue
o_ = my_file.name.split(".") # type: List[str]
if args.replace is False:
o_.insert(-1, best_guess.encoding)
if my_file.closed is False:
my_file.close()
elif (
args.force is False
and query_yes_no(
'Are you sure to normalize "{}" by replacing it ?'.format(
my_file.name
),
"no",
)
is False
):
if my_file.closed is False:
my_file.close()
continue
try:
x_[0].unicode_path = abspath("./{}".format(".".join(o_)))
with open(x_[0].unicode_path, "w", encoding="utf-8") as fp:
fp.write(str(best_guess))
except IOError as e:
print(str(e), file=sys.stderr)
if my_file.closed is False:
my_file.close()
return 2
if my_file.closed is False:
my_file.close()
if args.minimal is False:
print(
dumps(
[el.__dict__ for el in x_] if len(x_) > 1 else x_[0].__dict__,
ensure_ascii=True,
indent=4,
)
)
else:
for my_file in args.files:
print(
", ".join(
[
el.encoding or "undefined"
for el in x_
if el.path == abspath(my_file.name)
]
)
)
return 0
if __name__ == "__main__":
cli_detect()
| 9,364
|
Python
|
.py
| 259
| 22.15444
| 111
| 0.470134
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,864
|
functools.py
|
rembo10_headphones/lib/jaraco/functools.py
|
import functools
import time
import inspect
import collections
import types
import itertools
import more_itertools
from typing import Callable, TypeVar
CallableT = TypeVar("CallableT", bound=Callable[..., object])
def compose(*funcs):
"""
Compose any number of unary functions into a single unary function.
>>> import textwrap
>>> expected = str.strip(textwrap.dedent(compose.__doc__))
>>> strip_and_dedent = compose(str.strip, textwrap.dedent)
>>> strip_and_dedent(compose.__doc__) == expected
True
Compose also allows the innermost function to take arbitrary arguments.
>>> round_three = lambda x: round(x, ndigits=3)
>>> f = compose(round_three, int.__truediv__)
>>> [f(3*x, x+1) for x in range(1,10)]
[1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7]
"""
def compose_two(f1, f2):
return lambda *args, **kwargs: f1(f2(*args, **kwargs))
return functools.reduce(compose_two, funcs)
def method_caller(method_name, *args, **kwargs):
"""
Return a function that will call a named method on the
target object with optional positional and keyword
arguments.
>>> lower = method_caller('lower')
>>> lower('MyString')
'mystring'
"""
def call_method(target):
func = getattr(target, method_name)
return func(*args, **kwargs)
return call_method
def once(func):
"""
Decorate func so it's only ever called the first time.
This decorator can ensure that an expensive or non-idempotent function
will not be expensive on subsequent calls and is idempotent.
>>> add_three = once(lambda a: a+3)
>>> add_three(3)
6
>>> add_three(9)
6
>>> add_three('12')
6
To reset the stored value, simply clear the property ``saved_result``.
>>> del add_three.saved_result
>>> add_three(9)
12
>>> add_three(8)
12
Or invoke 'reset()' on it.
>>> add_three.reset()
>>> add_three(-3)
0
>>> add_three(0)
0
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not hasattr(wrapper, 'saved_result'):
wrapper.saved_result = func(*args, **kwargs)
return wrapper.saved_result
wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result')
return wrapper
def method_cache(
method: CallableT,
cache_wrapper: Callable[
[CallableT], CallableT
] = functools.lru_cache(), # type: ignore[assignment]
) -> CallableT:
"""
Wrap lru_cache to support storing the cache data in the object instances.
Abstracts the common paradigm where the method explicitly saves an
underscore-prefixed protected property on first call and returns that
subsequently.
>>> class MyClass:
... calls = 0
...
... @method_cache
... def method(self, value):
... self.calls += 1
... return value
>>> a = MyClass()
>>> a.method(3)
3
>>> for x in range(75):
... res = a.method(x)
>>> a.calls
75
Note that the apparent behavior will be exactly like that of lru_cache
except that the cache is stored on each instance, so values in one
instance will not flush values from another, and when an instance is
deleted, so are the cached values for that instance.
>>> b = MyClass()
>>> for x in range(35):
... res = b.method(x)
>>> b.calls
35
>>> a.method(0)
0
>>> a.calls
75
Note that if method had been decorated with ``functools.lru_cache()``,
a.calls would have been 76 (due to the cached value of 0 having been
flushed by the 'b' instance).
Clear the cache with ``.cache_clear()``
>>> a.method.cache_clear()
Same for a method that hasn't yet been called.
>>> c = MyClass()
>>> c.method.cache_clear()
Another cache wrapper may be supplied:
>>> cache = functools.lru_cache(maxsize=2)
>>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
>>> a = MyClass()
>>> a.method2()
3
Caution - do not subsequently wrap the method with another decorator, such
as ``@property``, which changes the semantics of the function.
See also
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
for another implementation and additional justification.
"""
def wrapper(self: object, *args: object, **kwargs: object) -> object:
# it's the first call, replace the method with a cached, bound method
bound_method: CallableT = types.MethodType( # type: ignore[assignment]
method, self
)
cached_method = cache_wrapper(bound_method)
setattr(self, method.__name__, cached_method)
return cached_method(*args, **kwargs)
# Support cache clear even before cache has been created.
wrapper.cache_clear = lambda: None # type: ignore[attr-defined]
return ( # type: ignore[return-value]
_special_method_cache(method, cache_wrapper) or wrapper
)
def _special_method_cache(method, cache_wrapper):
"""
Because Python treats special methods differently, it's not
possible to use instance attributes to implement the cached
methods.
Instead, install the wrapper method under a different name
and return a simple proxy to that wrapper.
https://github.com/jaraco/jaraco.functools/issues/5
"""
name = method.__name__
special_names = '__getattr__', '__getitem__'
if name not in special_names:
return
wrapper_name = '__cached' + name
def proxy(self, *args, **kwargs):
if wrapper_name not in vars(self):
bound = types.MethodType(method, self)
cache = cache_wrapper(bound)
setattr(self, wrapper_name, cache)
else:
cache = getattr(self, wrapper_name)
return cache(*args, **kwargs)
return proxy
def apply(transform):
"""
Decorate a function with a transform function that is
invoked on results returned from the decorated function.
>>> @apply(reversed)
... def get_numbers(start):
... "doc for get_numbers"
... return range(start, start+3)
>>> list(get_numbers(4))
[6, 5, 4]
>>> get_numbers.__doc__
'doc for get_numbers'
"""
def wrap(func):
return functools.wraps(func)(compose(transform, func))
return wrap
def result_invoke(action):
r"""
Decorate a function with an action function that is
invoked on the results returned from the decorated
function (for its side-effect), then return the original
result.
>>> @result_invoke(print)
... def add_two(a, b):
... return a + b
>>> x = add_two(2, 3)
5
>>> x
5
"""
def wrap(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
action(result)
return result
return wrapper
return wrap
def call_aside(f, *args, **kwargs):
"""
Call a function for its side effect after initialization.
>>> @call_aside
... def func(): print("called")
called
>>> func()
called
Use functools.partial to pass parameters to the initial call
>>> @functools.partial(call_aside, name='bingo')
... def func(name): print("called with", name)
called with bingo
"""
f(*args, **kwargs)
return f
class Throttler:
"""
Rate-limit a function (or other callable)
"""
def __init__(self, func, max_rate=float('Inf')):
if isinstance(func, Throttler):
func = func.func
self.func = func
self.max_rate = max_rate
self.reset()
def reset(self):
self.last_called = 0
def __call__(self, *args, **kwargs):
self._wait()
return self.func(*args, **kwargs)
def _wait(self):
"ensure at least 1/max_rate seconds from last call"
elapsed = time.time() - self.last_called
must_wait = 1 / self.max_rate - elapsed
time.sleep(max(0, must_wait))
self.last_called = time.time()
def __get__(self, obj, type=None):
return first_invoke(self._wait, functools.partial(self.func, obj))
def first_invoke(func1, func2):
"""
Return a function that when invoked will invoke func1 without
any parameters (for its side-effect) and then invoke func2
with whatever parameters were passed, returning its result.
"""
def wrapper(*args, **kwargs):
func1()
return func2(*args, **kwargs)
return wrapper
def retry_call(func, cleanup=lambda: None, retries=0, trap=()):
"""
Given a callable func, trap the indicated exceptions
for up to 'retries' times, invoking cleanup on the
exception. On the final attempt, allow any exceptions
to propagate.
"""
attempts = itertools.count() if retries == float('inf') else range(retries)
for attempt in attempts:
try:
return func()
except trap:
cleanup()
return func()
def retry(*r_args, **r_kwargs):
"""
Decorator wrapper for retry_call. Accepts arguments to retry_call
except func and then returns a decorator for the decorated function.
Ex:
>>> @retry(retries=3)
... def my_func(a, b):
... "this is my funk"
... print(a, b)
>>> my_func.__doc__
'this is my funk'
"""
def decorate(func):
@functools.wraps(func)
def wrapper(*f_args, **f_kwargs):
bound = functools.partial(func, *f_args, **f_kwargs)
return retry_call(bound, *r_args, **r_kwargs)
return wrapper
return decorate
def print_yielded(func):
"""
Convert a generator into a function that prints all yielded elements
>>> @print_yielded
... def x():
... yield 3; yield None
>>> x()
3
None
"""
print_all = functools.partial(map, print)
print_results = compose(more_itertools.consume, print_all, func)
return functools.wraps(func)(print_results)
def pass_none(func):
"""
Wrap func so it's not called if its first param is None
>>> print_text = pass_none(print)
>>> print_text('text')
text
>>> print_text(None)
"""
@functools.wraps(func)
def wrapper(param, *args, **kwargs):
if param is not None:
return func(param, *args, **kwargs)
return wrapper
def assign_params(func, namespace):
"""
Assign parameters from namespace where func solicits.
>>> def func(x, y=3):
... print(x, y)
>>> assigned = assign_params(func, dict(x=2, z=4))
>>> assigned()
2 3
The usual errors are raised if a function doesn't receive
its required parameters:
>>> assigned = assign_params(func, dict(y=3, z=4))
>>> assigned()
Traceback (most recent call last):
TypeError: func() ...argument...
It even works on methods:
>>> class Handler:
... def meth(self, arg):
... print(arg)
>>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
crystal
"""
sig = inspect.signature(func)
params = sig.parameters.keys()
call_ns = {k: namespace[k] for k in params if k in namespace}
return functools.partial(func, **call_ns)
def save_method_args(method):
"""
Wrap a method such that when it is called, the args and kwargs are
saved on the method.
>>> class MyClass:
... @save_method_args
... def method(self, a, b):
... print(a, b)
>>> my_ob = MyClass()
>>> my_ob.method(1, 2)
1 2
>>> my_ob._saved_method.args
(1, 2)
>>> my_ob._saved_method.kwargs
{}
>>> my_ob.method(a=3, b='foo')
3 foo
>>> my_ob._saved_method.args
()
>>> my_ob._saved_method.kwargs == dict(a=3, b='foo')
True
The arguments are stored on the instance, allowing for
different instance to save different args.
>>> your_ob = MyClass()
>>> your_ob.method({str('x'): 3}, b=[4])
{'x': 3} [4]
>>> your_ob._saved_method.args
({'x': 3},)
>>> my_ob._saved_method.args
()
"""
args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs')
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
attr_name = '_saved_' + method.__name__
attr = args_and_kwargs(args, kwargs)
setattr(self, attr_name, attr)
return method(self, *args, **kwargs)
return wrapper
def except_(*exceptions, replace=None, use=None):
"""
Replace the indicated exceptions, if raised, with the indicated
literal replacement or evaluated expression (if present).
>>> safe_int = except_(ValueError)(int)
>>> safe_int('five')
>>> safe_int('5')
5
Specify a literal replacement with ``replace``.
>>> safe_int_r = except_(ValueError, replace=0)(int)
>>> safe_int_r('five')
0
Provide an expression to ``use`` to pass through particular parameters.
>>> safe_int_pt = except_(ValueError, use='args[0]')(int)
>>> safe_int_pt('five')
'five'
"""
def decorate(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions:
try:
return eval(use)
except TypeError:
return replace
return wrapper
return decorate
| 13,494
|
Python
|
.py
| 403
| 27.456576
| 88
| 0.621559
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,865
|
collections.py
|
rembo10_headphones/lib/jaraco/collections.py
|
import re
import operator
import collections.abc
import itertools
import copy
import functools
import random
from jaraco.classes.properties import NonDataProperty
import jaraco.text
class Projection(collections.abc.Mapping):
"""
Project a set of keys over a mapping
>>> sample = {'a': 1, 'b': 2, 'c': 3}
>>> prj = Projection(['a', 'c', 'd'], sample)
>>> prj == {'a': 1, 'c': 3}
True
Keys should only appear if they were specified and exist in the space.
>>> sorted(list(prj.keys()))
['a', 'c']
Attempting to access a key not in the projection
results in a KeyError.
>>> prj['b']
Traceback (most recent call last):
...
KeyError: 'b'
Use the projection to update another dict.
>>> target = {'a': 2, 'b': 2}
>>> target.update(prj)
>>> target == {'a': 1, 'b': 2, 'c': 3}
True
Also note that Projection keeps a reference to the original dict, so
if you modify the original dict, that could modify the Projection.
>>> del sample['a']
>>> dict(prj)
{'c': 3}
"""
def __init__(self, keys, space):
self._keys = tuple(keys)
self._space = space
def __getitem__(self, key):
if key not in self._keys:
raise KeyError(key)
return self._space[key]
def __iter__(self):
return iter(set(self._keys).intersection(self._space))
def __len__(self):
return len(tuple(iter(self)))
class DictFilter(object):
"""
Takes a dict, and simulates a sub-dict based on the keys.
>>> sample = {'a': 1, 'b': 2, 'c': 3}
>>> filtered = DictFilter(sample, ['a', 'c'])
>>> filtered == {'a': 1, 'c': 3}
True
>>> set(filtered.values()) == {1, 3}
True
>>> set(filtered.items()) == {('a', 1), ('c', 3)}
True
One can also filter by a regular expression pattern
>>> sample['d'] = 4
>>> sample['ef'] = 5
Here we filter for only single-character keys
>>> filtered = DictFilter(sample, include_pattern='.$')
>>> filtered == {'a': 1, 'b': 2, 'c': 3, 'd': 4}
True
>>> filtered['e']
Traceback (most recent call last):
...
KeyError: 'e'
Also note that DictFilter keeps a reference to the original dict, so
if you modify the original dict, that could modify the filtered dict.
>>> del sample['d']
>>> del sample['a']
>>> filtered == {'b': 2, 'c': 3}
True
>>> filtered != {'b': 2, 'c': 3}
False
"""
def __init__(self, dict, include_keys=[], include_pattern=None):
self.dict = dict
self.specified_keys = set(include_keys)
if include_pattern is not None:
self.include_pattern = re.compile(include_pattern)
else:
# for performance, replace the pattern_keys property
self.pattern_keys = set()
def get_pattern_keys(self):
keys = filter(self.include_pattern.match, self.dict.keys())
return set(keys)
pattern_keys = NonDataProperty(get_pattern_keys)
@property
def include_keys(self):
return self.specified_keys.union(self.pattern_keys)
def keys(self):
return self.include_keys.intersection(self.dict.keys())
def values(self):
return map(self.dict.get, self.keys())
def __getitem__(self, i):
if i not in self.include_keys:
raise KeyError(i)
return self.dict[i]
def items(self):
keys = self.keys()
values = map(self.dict.get, keys)
return zip(keys, values)
def __eq__(self, other):
return dict(self) == other
def __ne__(self, other):
return dict(self) != other
def dict_map(function, dictionary):
"""
dict_map is much like the built-in function map. It takes a dictionary
and applys a function to the values of that dictionary, returning a
new dictionary with the mapped values in the original keys.
>>> d = dict_map(lambda x:x+1, dict(a=1, b=2))
>>> d == dict(a=2,b=3)
True
"""
return dict((key, function(value)) for key, value in dictionary.items())
class RangeMap(dict):
"""
A dictionary-like object that uses the keys as bounds for a range.
Inclusion of the value for that range is determined by the
key_match_comparator, which defaults to less-than-or-equal.
A value is returned for a key if it is the first key that matches in
the sorted list of keys.
One may supply keyword parameters to be passed to the sort function used
to sort keys (i.e. cmp [python 2 only], keys, reverse) as sort_params.
Let's create a map that maps 1-3 -> 'a', 4-6 -> 'b'
>>> r = RangeMap({3: 'a', 6: 'b'}) # boy, that was easy
>>> r[1], r[2], r[3], r[4], r[5], r[6]
('a', 'a', 'a', 'b', 'b', 'b')
Even float values should work so long as the comparison operator
supports it.
>>> r[4.5]
'b'
But you'll notice that the way rangemap is defined, it must be open-ended
on one side.
>>> r[0]
'a'
>>> r[-1]
'a'
One can close the open-end of the RangeMap by using undefined_value
>>> r = RangeMap({0: RangeMap.undefined_value, 3: 'a', 6: 'b'})
>>> r[0]
Traceback (most recent call last):
...
KeyError: 0
One can get the first or last elements in the range by using RangeMap.Item
>>> last_item = RangeMap.Item(-1)
>>> r[last_item]
'b'
.last_item is a shortcut for Item(-1)
>>> r[RangeMap.last_item]
'b'
Sometimes it's useful to find the bounds for a RangeMap
>>> r.bounds()
(0, 6)
RangeMap supports .get(key, default)
>>> r.get(0, 'not found')
'not found'
>>> r.get(7, 'not found')
'not found'
"""
def __init__(self, source, sort_params={}, key_match_comparator=operator.le):
dict.__init__(self, source)
self.sort_params = sort_params
self.match = key_match_comparator
def __getitem__(self, item):
sorted_keys = sorted(self.keys(), **self.sort_params)
if isinstance(item, RangeMap.Item):
result = self.__getitem__(sorted_keys[item])
else:
key = self._find_first_match_(sorted_keys, item)
result = dict.__getitem__(self, key)
if result is RangeMap.undefined_value:
raise KeyError(key)
return result
def get(self, key, default=None):
"""
Return the value for key if key is in the dictionary, else default.
If default is not given, it defaults to None, so that this method
never raises a KeyError.
"""
try:
return self[key]
except KeyError:
return default
def _find_first_match_(self, keys, item):
is_match = functools.partial(self.match, item)
matches = list(filter(is_match, keys))
if matches:
return matches[0]
raise KeyError(item)
def bounds(self):
sorted_keys = sorted(self.keys(), **self.sort_params)
return (sorted_keys[RangeMap.first_item], sorted_keys[RangeMap.last_item])
# some special values for the RangeMap
undefined_value = type(str('RangeValueUndefined'), (object,), {})()
class Item(int):
"RangeMap Item"
first_item = Item(0)
last_item = Item(-1)
def __identity(x):
return x
def sorted_items(d, key=__identity, reverse=False):
"""
Return the items of the dictionary sorted by the keys
>>> sample = dict(foo=20, bar=42, baz=10)
>>> tuple(sorted_items(sample))
(('bar', 42), ('baz', 10), ('foo', 20))
>>> reverse_string = lambda s: ''.join(reversed(s))
>>> tuple(sorted_items(sample, key=reverse_string))
(('foo', 20), ('bar', 42), ('baz', 10))
>>> tuple(sorted_items(sample, reverse=True))
(('foo', 20), ('baz', 10), ('bar', 42))
"""
# wrap the key func so it operates on the first element of each item
def pairkey_key(item):
return key(item[0])
return sorted(d.items(), key=pairkey_key, reverse=reverse)
class KeyTransformingDict(dict):
"""
A dict subclass that transforms the keys before they're used.
Subclasses may override the default transform_key to customize behavior.
"""
@staticmethod
def transform_key(key): # pragma: nocover
return key
def __init__(self, *args, **kargs):
super(KeyTransformingDict, self).__init__()
# build a dictionary using the default constructs
d = dict(*args, **kargs)
# build this dictionary using transformed keys.
for item in d.items():
self.__setitem__(*item)
def __setitem__(self, key, val):
key = self.transform_key(key)
super(KeyTransformingDict, self).__setitem__(key, val)
def __getitem__(self, key):
key = self.transform_key(key)
return super(KeyTransformingDict, self).__getitem__(key)
def __contains__(self, key):
key = self.transform_key(key)
return super(KeyTransformingDict, self).__contains__(key)
def __delitem__(self, key):
key = self.transform_key(key)
return super(KeyTransformingDict, self).__delitem__(key)
def get(self, key, *args, **kwargs):
key = self.transform_key(key)
return super(KeyTransformingDict, self).get(key, *args, **kwargs)
def setdefault(self, key, *args, **kwargs):
key = self.transform_key(key)
return super(KeyTransformingDict, self).setdefault(key, *args, **kwargs)
def pop(self, key, *args, **kwargs):
key = self.transform_key(key)
return super(KeyTransformingDict, self).pop(key, *args, **kwargs)
def matching_key_for(self, key):
"""
Given a key, return the actual key stored in self that matches.
Raise KeyError if the key isn't found.
"""
try:
return next(e_key for e_key in self.keys() if e_key == key)
except StopIteration:
raise KeyError(key)
class FoldedCaseKeyedDict(KeyTransformingDict):
"""
A case-insensitive dictionary (keys are compared as insensitive
if they are strings).
>>> d = FoldedCaseKeyedDict()
>>> d['heLlo'] = 'world'
>>> list(d.keys()) == ['heLlo']
True
>>> list(d.values()) == ['world']
True
>>> d['hello'] == 'world'
True
>>> 'hello' in d
True
>>> 'HELLO' in d
True
>>> print(repr(FoldedCaseKeyedDict({'heLlo': 'world'})).replace("u'", "'"))
{'heLlo': 'world'}
>>> d = FoldedCaseKeyedDict({'heLlo': 'world'})
>>> print(d['hello'])
world
>>> print(d['Hello'])
world
>>> list(d.keys())
['heLlo']
>>> d = FoldedCaseKeyedDict({'heLlo': 'world', 'Hello': 'world'})
>>> list(d.values())
['world']
>>> key, = d.keys()
>>> key in ['heLlo', 'Hello']
True
>>> del d['HELLO']
>>> d
{}
get should work
>>> d['Sumthin'] = 'else'
>>> d.get('SUMTHIN')
'else'
>>> d.get('OTHER', 'thing')
'thing'
>>> del d['sumthin']
setdefault should also work
>>> d['This'] = 'that'
>>> print(d.setdefault('this', 'other'))
that
>>> len(d)
1
>>> print(d['this'])
that
>>> print(d.setdefault('That', 'other'))
other
>>> print(d['THAT'])
other
Make it pop!
>>> print(d.pop('THAT'))
other
To retrieve the key in its originally-supplied form, use matching_key_for
>>> print(d.matching_key_for('this'))
This
>>> d.matching_key_for('missing')
Traceback (most recent call last):
...
KeyError: 'missing'
"""
@staticmethod
def transform_key(key):
return jaraco.text.FoldedCase(key)
class DictAdapter(object):
"""
Provide a getitem interface for attributes of an object.
Let's say you want to get at the string.lowercase property in a formatted
string. It's easy with DictAdapter.
>>> import string
>>> print("lowercase is %(ascii_lowercase)s" % DictAdapter(string))
lowercase is abcdefghijklmnopqrstuvwxyz
"""
def __init__(self, wrapped_ob):
self.object = wrapped_ob
def __getitem__(self, name):
return getattr(self.object, name)
class ItemsAsAttributes(object):
"""
Mix-in class to enable a mapping object to provide items as
attributes.
>>> C = type(str('C'), (dict, ItemsAsAttributes), dict())
>>> i = C()
>>> i['foo'] = 'bar'
>>> i.foo
'bar'
Natural attribute access takes precedence
>>> i.foo = 'henry'
>>> i.foo
'henry'
But as you might expect, the mapping functionality is preserved.
>>> i['foo']
'bar'
A normal attribute error should be raised if an attribute is
requested that doesn't exist.
>>> i.missing
Traceback (most recent call last):
...
AttributeError: 'C' object has no attribute 'missing'
It also works on dicts that customize __getitem__
>>> missing_func = lambda self, key: 'missing item'
>>> C = type(
... str('C'),
... (dict, ItemsAsAttributes),
... dict(__missing__ = missing_func),
... )
>>> i = C()
>>> i.missing
'missing item'
>>> i.foo
'missing item'
"""
def __getattr__(self, key):
try:
return getattr(super(ItemsAsAttributes, self), key)
except AttributeError as e:
# attempt to get the value from the mapping (return self[key])
# but be careful not to lose the original exception context.
noval = object()
def _safe_getitem(cont, key, missing_result):
try:
return cont[key]
except KeyError:
return missing_result
result = _safe_getitem(self, key, noval)
if result is not noval:
return result
# raise the original exception, but use the original class
# name, not 'super'.
(message,) = e.args
message = message.replace('super', self.__class__.__name__, 1)
e.args = (message,)
raise
def invert_map(map):
"""
Given a dictionary, return another dictionary with keys and values
switched. If any of the values resolve to the same key, raises
a ValueError.
>>> numbers = dict(a=1, b=2, c=3)
>>> letters = invert_map(numbers)
>>> letters[1]
'a'
>>> numbers['d'] = 3
>>> invert_map(numbers)
Traceback (most recent call last):
...
ValueError: Key conflict in inverted mapping
"""
res = dict((v, k) for k, v in map.items())
if not len(res) == len(map):
raise ValueError('Key conflict in inverted mapping')
return res
class IdentityOverrideMap(dict):
"""
A dictionary that by default maps each key to itself, but otherwise
acts like a normal dictionary.
>>> d = IdentityOverrideMap()
>>> d[42]
42
>>> d['speed'] = 'speedo'
>>> print(d['speed'])
speedo
"""
def __missing__(self, key):
return key
class DictStack(list, collections.abc.Mapping):
"""
A stack of dictionaries that behaves as a view on those dictionaries,
giving preference to the last.
>>> stack = DictStack([dict(a=1, c=2), dict(b=2, a=2)])
>>> stack['a']
2
>>> stack['b']
2
>>> stack['c']
2
>>> len(stack)
3
>>> stack.push(dict(a=3))
>>> stack['a']
3
>>> set(stack.keys()) == set(['a', 'b', 'c'])
True
>>> set(stack.items()) == set([('a', 3), ('b', 2), ('c', 2)])
True
>>> dict(**stack) == dict(stack) == dict(a=3, c=2, b=2)
True
>>> d = stack.pop()
>>> stack['a']
2
>>> d = stack.pop()
>>> stack['a']
1
>>> stack.get('b', None)
>>> 'c' in stack
True
"""
def __iter__(self):
dicts = list.__iter__(self)
return iter(set(itertools.chain.from_iterable(c.keys() for c in dicts)))
def __getitem__(self, key):
for scope in reversed(tuple(list.__iter__(self))):
if key in scope:
return scope[key]
raise KeyError(key)
push = list.append
def __contains__(self, other):
return collections.abc.Mapping.__contains__(self, other)
def __len__(self):
return len(list(iter(self)))
class BijectiveMap(dict):
"""
A Bijective Map (two-way mapping).
Implemented as a simple dictionary of 2x the size, mapping values back
to keys.
Note, this implementation may be incomplete. If there's not a test for
your use case below, it's likely to fail, so please test and send pull
requests or patches for additional functionality needed.
>>> m = BijectiveMap()
>>> m['a'] = 'b'
>>> m == {'a': 'b', 'b': 'a'}
True
>>> print(m['b'])
a
>>> m['c'] = 'd'
>>> len(m)
2
Some weird things happen if you map an item to itself or overwrite a
single key of a pair, so it's disallowed.
>>> m['e'] = 'e'
Traceback (most recent call last):
ValueError: Key cannot map to itself
>>> m['d'] = 'e'
Traceback (most recent call last):
ValueError: Key/Value pairs may not overlap
>>> m['e'] = 'd'
Traceback (most recent call last):
ValueError: Key/Value pairs may not overlap
>>> print(m.pop('d'))
c
>>> 'c' in m
False
>>> m = BijectiveMap(dict(a='b'))
>>> len(m)
1
>>> print(m['b'])
a
>>> m = BijectiveMap()
>>> m.update(a='b')
>>> m['b']
'a'
>>> del m['b']
>>> len(m)
0
>>> 'a' in m
False
"""
def __init__(self, *args, **kwargs):
super(BijectiveMap, self).__init__()
self.update(*args, **kwargs)
def __setitem__(self, item, value):
if item == value:
raise ValueError("Key cannot map to itself")
overlap = (
item in self
and self[item] != value
or value in self
and self[value] != item
)
if overlap:
raise ValueError("Key/Value pairs may not overlap")
super(BijectiveMap, self).__setitem__(item, value)
super(BijectiveMap, self).__setitem__(value, item)
def __delitem__(self, item):
self.pop(item)
def __len__(self):
return super(BijectiveMap, self).__len__() // 2
def pop(self, key, *args, **kwargs):
mirror = self[key]
super(BijectiveMap, self).__delitem__(mirror)
return super(BijectiveMap, self).pop(key, *args, **kwargs)
def update(self, *args, **kwargs):
# build a dictionary using the default constructs
d = dict(*args, **kwargs)
# build this dictionary using transformed keys.
for item in d.items():
self.__setitem__(*item)
class FrozenDict(collections.abc.Mapping, collections.abc.Hashable):
"""
An immutable mapping.
>>> a = FrozenDict(a=1, b=2)
>>> b = FrozenDict(a=1, b=2)
>>> a == b
True
>>> a == dict(a=1, b=2)
True
>>> dict(a=1, b=2) == a
True
>>> 'a' in a
True
>>> type(hash(a)) is type(0)
True
>>> set(iter(a)) == {'a', 'b'}
True
>>> len(a)
2
>>> a['a'] == a.get('a') == 1
True
>>> a['c'] = 3
Traceback (most recent call last):
...
TypeError: 'FrozenDict' object does not support item assignment
>>> a.update(y=3)
Traceback (most recent call last):
...
AttributeError: 'FrozenDict' object has no attribute 'update'
Copies should compare equal
>>> copy.copy(a) == a
True
Copies should be the same type
>>> isinstance(copy.copy(a), FrozenDict)
True
FrozenDict supplies .copy(), even though
collections.abc.Mapping doesn't demand it.
>>> a.copy() == a
True
>>> a.copy() is not a
True
"""
__slots__ = ['__data']
def __new__(cls, *args, **kwargs):
self = super(FrozenDict, cls).__new__(cls)
self.__data = dict(*args, **kwargs)
return self
# Container
def __contains__(self, key):
return key in self.__data
# Hashable
def __hash__(self):
return hash(tuple(sorted(self.__data.items())))
# Mapping
def __iter__(self):
return iter(self.__data)
def __len__(self):
return len(self.__data)
def __getitem__(self, key):
return self.__data[key]
# override get for efficiency provided by dict
def get(self, *args, **kwargs):
return self.__data.get(*args, **kwargs)
# override eq to recognize underlying implementation
def __eq__(self, other):
if isinstance(other, FrozenDict):
other = other.__data
return self.__data.__eq__(other)
def copy(self):
"Return a shallow copy of self"
return copy.copy(self)
class Enumeration(ItemsAsAttributes, BijectiveMap):
"""
A convenient way to provide enumerated values
>>> e = Enumeration('a b c')
>>> e['a']
0
>>> e.a
0
>>> e[1]
'b'
>>> set(e.names) == set('abc')
True
>>> set(e.codes) == set(range(3))
True
>>> e.get('d') is None
True
Codes need not start with 0
>>> e = Enumeration('a b c', range(1, 4))
>>> e['a']
1
>>> e[3]
'c'
"""
def __init__(self, names, codes=None):
if isinstance(names, str):
names = names.split()
if codes is None:
codes = itertools.count()
super(Enumeration, self).__init__(zip(names, codes))
@property
def names(self):
return (key for key in self if isinstance(key, str))
@property
def codes(self):
return (self[name] for name in self.names)
class Everything(object):
"""
A collection "containing" every possible thing.
>>> 'foo' in Everything()
True
>>> import random
>>> random.randint(1, 999) in Everything()
True
>>> random.choice([None, 'foo', 42, ('a', 'b', 'c')]) in Everything()
True
"""
def __contains__(self, other):
return True
class InstrumentedDict(collections.UserDict): # type: ignore # buggy mypy
"""
Instrument an existing dictionary with additional
functionality, but always reference and mutate
the original dictionary.
>>> orig = {'a': 1, 'b': 2}
>>> inst = InstrumentedDict(orig)
>>> inst['a']
1
>>> inst['c'] = 3
>>> orig['c']
3
>>> inst.keys() == orig.keys()
True
"""
def __init__(self, data):
super().__init__()
self.data = data
class Least(object):
"""
A value that is always lesser than any other
>>> least = Least()
>>> 3 < least
False
>>> 3 > least
True
>>> least < 3
True
>>> least <= 3
True
>>> least > 3
False
>>> 'x' > least
True
>>> None > least
True
"""
def __le__(self, other):
return True
__lt__ = __le__
def __ge__(self, other):
return False
__gt__ = __ge__
class Greatest(object):
"""
A value that is always greater than any other
>>> greatest = Greatest()
>>> 3 < greatest
True
>>> 3 > greatest
False
>>> greatest < 3
False
>>> greatest > 3
True
>>> greatest >= 3
True
>>> 'x' > greatest
False
>>> None > greatest
False
"""
def __ge__(self, other):
return True
__gt__ = __ge__
def __le__(self, other):
return False
__lt__ = __le__
def pop_all(items):
"""
Clear items in place and return a copy of items.
>>> items = [1, 2, 3]
>>> popped = pop_all(items)
>>> popped is items
False
>>> popped
[1, 2, 3]
>>> items
[]
"""
result, items[:] = items[:], []
return result
# mypy disabled for pytest-dev/pytest#8332
class FreezableDefaultDict(collections.defaultdict): # type: ignore
"""
Often it is desirable to prevent the mutation of
a default dict after its initial construction, such
as to prevent mutation during iteration.
>>> dd = FreezableDefaultDict(list)
>>> dd[0].append('1')
>>> dd.freeze()
>>> dd[1]
[]
>>> len(dd)
1
"""
def __missing__(self, key):
return getattr(self, '_frozen', super().__missing__)(key)
def freeze(self):
self._frozen = lambda key: self.default_factory()
class Accumulator:
def __init__(self, initial=0):
self.val = initial
def __call__(self, val):
self.val += val
return self.val
class WeightedLookup(RangeMap):
"""
Given parameters suitable for a dict representing keys
and a weighted proportion, return a RangeMap representing
spans of values proportial to the weights:
>>> even = WeightedLookup(a=1, b=1)
[0, 1) -> a
[1, 2) -> b
>>> lk = WeightedLookup(a=1, b=2)
[0, 1) -> a
[1, 3) -> b
>>> lk[.5]
'a'
>>> lk[1.5]
'b'
Adds ``.random()`` to select a random weighted value:
>>> lk.random() in ['a', 'b']
True
>>> choices = [lk.random() for x in range(1000)]
Statistically speaking, choices should be .5 a:b
>>> ratio = choices.count('a') / choices.count('b')
>>> .4 < ratio < .6
True
"""
def __init__(self, *args, **kwargs):
raw = dict(*args, **kwargs)
# allocate keys by weight
indexes = map(Accumulator(), raw.values())
super().__init__(zip(indexes, raw.keys()), key_match_comparator=operator.lt)
def random(self):
lower, upper = self.bounds()
selector = random.random() * upper
return self[selector]
| 25,589
|
Python
|
.py
| 814
| 25.136364
| 84
| 0.580239
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,866
|
meta.py
|
rembo10_headphones/lib/jaraco/classes/meta.py
|
"""
meta.py
Some useful metaclasses.
"""
class LeafClassesMeta(type):
"""
A metaclass for classes that keeps track of all of them that
aren't base classes.
>>> Parent = LeafClassesMeta('MyParentClass', (), {})
>>> Parent in Parent._leaf_classes
True
>>> Child = LeafClassesMeta('MyChildClass', (Parent,), {})
>>> Child in Parent._leaf_classes
True
>>> Parent in Parent._leaf_classes
False
>>> Other = LeafClassesMeta('OtherClass', (), {})
>>> Parent in Other._leaf_classes
False
>>> len(Other._leaf_classes)
1
"""
def __init__(cls, name, bases, attrs):
if not hasattr(cls, '_leaf_classes'):
cls._leaf_classes = set()
leaf_classes = getattr(cls, '_leaf_classes')
leaf_classes.add(cls)
# remove any base classes
leaf_classes -= set(bases)
class TagRegistered(type):
"""
As classes of this metaclass are created, they keep a registry in the
base class of all classes by a class attribute, indicated by attr_name.
>>> FooObject = TagRegistered('FooObject', (), dict(tag='foo'))
>>> FooObject._registry['foo'] is FooObject
True
>>> BarObject = TagRegistered('Barobject', (FooObject,), dict(tag='bar'))
>>> FooObject._registry is BarObject._registry
True
>>> len(FooObject._registry)
2
'...' below should be 'jaraco.classes' but for pytest-dev/pytest#3396
>>> FooObject._registry['bar']
<class '....meta.Barobject'>
"""
attr_name = 'tag'
def __init__(cls, name, bases, namespace):
super(TagRegistered, cls).__init__(name, bases, namespace)
if not hasattr(cls, '_registry'):
cls._registry = {}
meta = cls.__class__
attr = getattr(cls, meta.attr_name, None)
if attr:
cls._registry[attr] = cls
| 1,853
|
Python
|
.py
| 54
| 28.351852
| 77
| 0.616676
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,867
|
properties.py
|
rembo10_headphones/lib/jaraco/classes/properties.py
|
class NonDataProperty:
"""Much like the property builtin, but only implements __get__,
making it a non-data property, and can be subsequently reset.
See http://users.rcn.com/python/download/Descriptor.htm for more
information.
>>> class X(object):
... @NonDataProperty
... def foo(self):
... return 3
>>> x = X()
>>> x.foo
3
>>> x.foo = 4
>>> x.foo
4
'...' below should be 'jaraco.classes' but for pytest-dev/pytest#3396
>>> X.foo
<....properties.NonDataProperty object at ...>
"""
def __init__(self, fget):
assert fget is not None, "fget cannot be none"
assert callable(fget), "fget must be callable"
self.fget = fget
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self.fget(obj)
class classproperty:
"""
Like @property but applies at the class level.
>>> class X(metaclass=classproperty.Meta):
... val = None
... @classproperty
... def foo(cls):
... return cls.val
... @foo.setter
... def foo(cls, val):
... cls.val = val
>>> X.foo
>>> X.foo = 3
>>> X.foo
3
>>> x = X()
>>> x.foo
3
>>> X.foo = 4
>>> x.foo
4
Setting the property on an instance affects the class.
>>> x.foo = 5
>>> x.foo
5
>>> X.foo
5
>>> vars(x)
{}
>>> X().foo
5
Attempting to set an attribute where no setter was defined
results in an AttributeError:
>>> class GetOnly(metaclass=classproperty.Meta):
... @classproperty
... def foo(cls):
... return 'bar'
>>> GetOnly.foo = 3
Traceback (most recent call last):
...
AttributeError: can't set attribute
It is also possible to wrap a classmethod or staticmethod in
a classproperty.
>>> class Static(metaclass=classproperty.Meta):
... @classproperty
... @classmethod
... def foo(cls):
... return 'foo'
... @classproperty
... @staticmethod
... def bar():
... return 'bar'
>>> Static.foo
'foo'
>>> Static.bar
'bar'
*Legacy*
For compatibility, if the metaclass isn't specified, the
legacy behavior will be invoked.
>>> class X:
... val = None
... @classproperty
... def foo(cls):
... return cls.val
... @foo.setter
... def foo(cls, val):
... cls.val = val
>>> X.foo
>>> X.foo = 3
>>> X.foo
3
>>> x = X()
>>> x.foo
3
>>> X.foo = 4
>>> x.foo
4
Note, because the metaclass was not specified, setting
a value on an instance does not have the intended effect.
>>> x.foo = 5
>>> x.foo
5
>>> X.foo # should be 5
4
>>> vars(x) # should be empty
{'foo': 5}
>>> X().foo # should be 5
4
"""
class Meta(type):
def __setattr__(self, key, value):
obj = self.__dict__.get(key, None)
if type(obj) is classproperty:
return obj.__set__(self, value)
return super().__setattr__(key, value)
def __init__(self, fget, fset=None):
self.fget = self._fix_function(fget)
self.fset = fset
fset and self.setter(fset)
def __get__(self, instance, owner=None):
return self.fget.__get__(None, owner)()
def __set__(self, owner, value):
if not self.fset:
raise AttributeError("can't set attribute")
if type(owner) is not classproperty.Meta:
owner = type(owner)
return self.fset.__get__(None, owner)(value)
def setter(self, fset):
self.fset = self._fix_function(fset)
return self
@classmethod
def _fix_function(cls, fn):
"""
Ensure fn is a classmethod or staticmethod.
"""
if not isinstance(fn, (classmethod, staticmethod)):
return classmethod(fn)
return fn
| 3,980
|
Python
|
.py
| 145
| 21.303448
| 73
| 0.550801
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,868
|
ancestry.py
|
rembo10_headphones/lib/jaraco/classes/ancestry.py
|
"""
Routines for obtaining the class names
of an object and its parent classes.
"""
from more_itertools import unique_everseen
def all_bases(c):
"""
return a tuple of all base classes the class c has as a parent.
>>> object in all_bases(list)
True
"""
return c.mro()[1:]
def all_classes(c):
"""
return a tuple of all classes to which c belongs
>>> list in all_classes(list)
True
"""
return c.mro()
# borrowed from
# http://code.activestate.com/recipes/576949-find-all-subclasses-of-a-given-class/
def iter_subclasses(cls):
"""
Generator over all subclasses of a given class, in depth-first order.
>>> bool in list(iter_subclasses(int))
True
>>> class A(object): pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in iter_subclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL classes currently defined
>>> res = [cls.__name__ for cls in iter_subclasses(object)]
>>> 'type' in res
True
>>> 'tuple' in res
True
>>> len(res) > 100
True
"""
return unique_everseen(_iter_all_subclasses(cls))
def _iter_all_subclasses(cls):
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in subs:
yield sub
yield from iter_subclasses(sub)
| 1,464
|
Python
|
.py
| 56
| 21.428571
| 82
| 0.61533
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,869
|
__init__.py
|
rembo10_headphones/lib/jaraco/text/__init__.py
|
import re
import itertools
import textwrap
import functools
try:
from importlib.resources import files # type: ignore
except ImportError: # pragma: nocover
from importlib_resources import files # type: ignore
from jaraco.functools import compose, method_cache
def substitution(old, new):
"""
Return a function that will perform a substitution on a string
"""
return lambda s: s.replace(old, new)
def multi_substitution(*substitutions):
"""
Take a sequence of pairs specifying substitutions, and create
a function that performs those substitutions.
>>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
'baz'
"""
substitutions = itertools.starmap(substitution, substitutions)
# compose function applies last function first, so reverse the
# substitutions to get the expected order.
substitutions = reversed(tuple(substitutions))
return compose(*substitutions)
class FoldedCase(str):
"""
A case insensitive string class; behaves just like str
except compares equal when the only variation is case.
>>> s = FoldedCase('hello world')
>>> s == 'Hello World'
True
>>> 'Hello World' == s
True
>>> s != 'Hello World'
False
>>> s.index('O')
4
>>> s.split('O')
['hell', ' w', 'rld']
>>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
['alpha', 'Beta', 'GAMMA']
Sequence membership is straightforward.
>>> "Hello World" in [s]
True
>>> s in ["Hello World"]
True
You may test for set inclusion, but candidate and elements
must both be folded.
>>> FoldedCase("Hello World") in {s}
True
>>> s in {FoldedCase("Hello World")}
True
String inclusion works as long as the FoldedCase object
is on the right.
>>> "hello" in FoldedCase("Hello World")
True
But not if the FoldedCase object is on the left:
>>> FoldedCase('hello') in 'Hello World'
False
In that case, use ``in_``:
>>> FoldedCase('hello').in_('Hello World')
True
>>> FoldedCase('hello') > FoldedCase('Hello')
False
"""
def __lt__(self, other):
return self.lower() < other.lower()
def __gt__(self, other):
return self.lower() > other.lower()
def __eq__(self, other):
return self.lower() == other.lower()
def __ne__(self, other):
return self.lower() != other.lower()
def __hash__(self):
return hash(self.lower())
def __contains__(self, other):
return super().lower().__contains__(other.lower())
def in_(self, other):
"Does self appear in other?"
return self in FoldedCase(other)
# cache lower since it's likely to be called frequently.
@method_cache
def lower(self):
return super().lower()
def index(self, sub):
return self.lower().index(sub.lower())
def split(self, splitter=' ', maxsplit=0):
pattern = re.compile(re.escape(splitter), re.I)
return pattern.split(self, maxsplit)
def is_decodable(value):
r"""
Return True if the supplied value is decodable (using the default
encoding).
>>> is_decodable(b'\xff')
False
>>> is_decodable(b'\x32')
True
"""
# TODO: This code could be expressed more consisely and directly
# with a jaraco.context.ExceptionTrap, but that adds an unfortunate
# long dependency tree, so for now, use boolean literals.
try:
value.decode()
except UnicodeDecodeError:
return False
return True
def is_binary(value):
r"""
Return True if the value appears to be binary (that is, it's a byte
string and isn't decodable).
>>> is_binary(b'\xff')
True
>>> is_binary('\xff')
False
"""
return isinstance(value, bytes) and not is_decodable(value)
def trim(s):
r"""
Trim something like a docstring to remove the whitespace that
is common due to indentation and formatting.
>>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
'foo = bar\n\tbar = baz'
"""
return textwrap.dedent(s).strip()
def wrap(s):
"""
Wrap lines of text, retaining existing newlines as
paragraph markers.
>>> print(wrap(lorem_ipsum))
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad
minim veniam, quis nostrud exercitation ullamco laboris nisi ut
aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla
pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
culpa qui officia deserunt mollit anim id est laborum.
<BLANKLINE>
Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam
varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus
magna felis sollicitudin mauris. Integer in mauris eu nibh euismod
gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis
risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue,
eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas
fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla
a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis,
neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing
sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque
nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus
quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis,
molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
"""
paragraphs = s.splitlines()
wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs)
return '\n\n'.join(wrapped)
def unwrap(s):
r"""
Given a multi-line string, return an unwrapped version.
>>> wrapped = wrap(lorem_ipsum)
>>> wrapped.count('\n')
20
>>> unwrapped = unwrap(wrapped)
>>> unwrapped.count('\n')
1
>>> print(unwrapped)
Lorem ipsum dolor sit amet, consectetur adipiscing ...
Curabitur pretium tincidunt lacus. Nulla gravida orci ...
"""
paragraphs = re.split(r'\n\n+', s)
cleaned = (para.replace('\n', ' ') for para in paragraphs)
return '\n'.join(cleaned)
lorem_ipsum: str = files(__name__).joinpath('Lorem ipsum.txt').read_text()
class Splitter(object):
"""object that will split a string with the given arguments for each call
>>> s = Splitter(',')
>>> s('hello, world, this is your, master calling')
['hello', ' world', ' this is your', ' master calling']
"""
def __init__(self, *args):
self.args = args
def __call__(self, s):
return s.split(*self.args)
def indent(string, prefix=' ' * 4):
"""
>>> indent('foo')
' foo'
"""
return prefix + string
class WordSet(tuple):
"""
Given an identifier, return the words that identifier represents,
whether in camel case, underscore-separated, etc.
>>> WordSet.parse("camelCase")
('camel', 'Case')
>>> WordSet.parse("under_sep")
('under', 'sep')
Acronyms should be retained
>>> WordSet.parse("firstSNL")
('first', 'SNL')
>>> WordSet.parse("you_and_I")
('you', 'and', 'I')
>>> WordSet.parse("A simple test")
('A', 'simple', 'test')
Multiple caps should not interfere with the first cap of another word.
>>> WordSet.parse("myABCClass")
('my', 'ABC', 'Class')
The result is a WordSet, so you can get the form you need.
>>> WordSet.parse("myABCClass").underscore_separated()
'my_ABC_Class'
>>> WordSet.parse('a-command').camel_case()
'ACommand'
>>> WordSet.parse('someIdentifier').lowered().space_separated()
'some identifier'
Slices of the result should return another WordSet.
>>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
'out_of_context'
>>> WordSet.from_class_name(WordSet()).lowered().space_separated()
'word set'
>>> example = WordSet.parse('figured it out')
>>> example.headless_camel_case()
'figuredItOut'
>>> example.dash_separated()
'figured-it-out'
"""
_pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
def capitalized(self):
return WordSet(word.capitalize() for word in self)
def lowered(self):
return WordSet(word.lower() for word in self)
def camel_case(self):
return ''.join(self.capitalized())
def headless_camel_case(self):
words = iter(self)
first = next(words).lower()
new_words = itertools.chain((first,), WordSet(words).camel_case())
return ''.join(new_words)
def underscore_separated(self):
return '_'.join(self)
def dash_separated(self):
return '-'.join(self)
def space_separated(self):
return ' '.join(self)
def trim_right(self, item):
"""
Remove the item from the end of the set.
>>> WordSet.parse('foo bar').trim_right('foo')
('foo', 'bar')
>>> WordSet.parse('foo bar').trim_right('bar')
('foo',)
>>> WordSet.parse('').trim_right('bar')
()
"""
return self[:-1] if self and self[-1] == item else self
def trim_left(self, item):
"""
Remove the item from the beginning of the set.
>>> WordSet.parse('foo bar').trim_left('foo')
('bar',)
>>> WordSet.parse('foo bar').trim_left('bar')
('foo', 'bar')
>>> WordSet.parse('').trim_left('bar')
()
"""
return self[1:] if self and self[0] == item else self
def trim(self, item):
"""
>>> WordSet.parse('foo bar').trim('foo')
('bar',)
"""
return self.trim_left(item).trim_right(item)
def __getitem__(self, item):
result = super(WordSet, self).__getitem__(item)
if isinstance(item, slice):
result = WordSet(result)
return result
# for compatibility with Python 2
def __getslice__(self, i, j): # pragma: nocover
return self.__getitem__(slice(i, j))
@classmethod
def parse(cls, identifier):
matches = cls._pattern.finditer(identifier)
return WordSet(match.group(0) for match in matches)
@classmethod
def from_class_name(cls, subject):
return cls.parse(subject.__class__.__name__)
# for backward compatibility
words = WordSet.parse
def simple_html_strip(s):
r"""
Remove HTML from the string `s`.
>>> str(simple_html_strip(''))
''
>>> print(simple_html_strip('A <bold>stormy</bold> day in paradise'))
A stormy day in paradise
>>> print(simple_html_strip('Somebody <!-- do not --> tell the truth.'))
Somebody tell the truth.
>>> print(simple_html_strip('What about<br/>\nmultiple lines?'))
What about
multiple lines?
"""
html_stripper = re.compile('(<!--.*?-->)|(<[^>]*>)|([^<]+)', re.DOTALL)
texts = (match.group(3) or '' for match in html_stripper.finditer(s))
return ''.join(texts)
class SeparatedValues(str):
"""
A string separated by a separator. Overrides __iter__ for getting
the values.
>>> list(SeparatedValues('a,b,c'))
['a', 'b', 'c']
Whitespace is stripped and empty values are discarded.
>>> list(SeparatedValues(' a, b , c, '))
['a', 'b', 'c']
"""
separator = ','
def __iter__(self):
parts = self.split(self.separator)
return filter(None, (part.strip() for part in parts))
class Stripper:
r"""
Given a series of lines, find the common prefix and strip it from them.
>>> lines = [
... 'abcdefg\n',
... 'abc\n',
... 'abcde\n',
... ]
>>> res = Stripper.strip_prefix(lines)
>>> res.prefix
'abc'
>>> list(res.lines)
['defg\n', '\n', 'de\n']
If no prefix is common, nothing should be stripped.
>>> lines = [
... 'abcd\n',
... '1234\n',
... ]
>>> res = Stripper.strip_prefix(lines)
>>> res.prefix = ''
>>> list(res.lines)
['abcd\n', '1234\n']
"""
def __init__(self, prefix, lines):
self.prefix = prefix
self.lines = map(self, lines)
@classmethod
def strip_prefix(cls, lines):
prefix_lines, lines = itertools.tee(lines)
prefix = functools.reduce(cls.common_prefix, prefix_lines)
return cls(prefix, lines)
def __call__(self, line):
if not self.prefix:
return line
null, prefix, rest = line.partition(self.prefix)
return rest
@staticmethod
def common_prefix(s1, s2):
"""
Return the common prefix of two lines.
"""
index = min(len(s1), len(s2))
while s1[:index] != s2[:index]:
index -= 1
return s1[:index]
def remove_prefix(text, prefix):
"""
Remove the prefix from the text if it exists.
>>> remove_prefix('underwhelming performance', 'underwhelming ')
'performance'
>>> remove_prefix('something special', 'sample')
'something special'
"""
null, prefix, rest = text.rpartition(prefix)
return rest
def remove_suffix(text, suffix):
"""
Remove the suffix from the text if it exists.
>>> remove_suffix('name.git', '.git')
'name'
>>> remove_suffix('something special', 'sample')
'something special'
"""
rest, suffix, null = text.partition(suffix)
return rest
def normalize_newlines(text):
r"""
Replace alternate newlines with the canonical newline.
>>> normalize_newlines('Lorem Ipsum\u2029')
'Lorem Ipsum\n'
>>> normalize_newlines('Lorem Ipsum\r\n')
'Lorem Ipsum\n'
>>> normalize_newlines('Lorem Ipsum\x85')
'Lorem Ipsum\n'
"""
newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029']
pattern = '|'.join(newlines)
return re.sub(pattern, '\n', text)
| 13,972
|
Python
|
.py
| 396
| 29.411616
| 77
| 0.627613
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,870
|
util.py
|
rembo10_headphones/lib/musicbrainzngs/util.py
|
# This file is part of the musicbrainzngs library
# Copyright (C) Alastair Porter, Adrian Sampson, and others
# This file is distributed under a BSD-2-Clause type license.
# See the COPYING file for more information.
import sys
import locale
import xml.etree.ElementTree as ET
from . import compat
def _unicode(string, encoding=None):
"""Try to decode byte strings to unicode.
This can only be a guess, but this might be better than failing.
It is safe to use this on numbers or strings that are already unicode.
"""
if isinstance(string, compat.unicode):
unicode_string = string
elif isinstance(string, compat.bytes):
# use given encoding, stdin, preferred until something != None is found
if encoding is None:
encoding = sys.stdin.encoding
if encoding is None:
encoding = locale.getpreferredencoding()
unicode_string = string.decode(encoding, "ignore")
else:
unicode_string = compat.unicode(string)
return unicode_string.replace('\x00', '').strip()
def bytes_to_elementtree(bytes_or_file):
"""Given a bytestring or a file-like object that will produce them,
parse and return an ElementTree.
"""
if isinstance(bytes_or_file, compat.basestring):
s = bytes_or_file
else:
s = bytes_or_file.read()
if compat.is_py3:
s = _unicode(s, "utf-8")
f = compat.StringIO(s)
tree = ET.ElementTree(file=f)
return tree
| 1,428
|
Python
|
.py
| 38
| 33.289474
| 79
| 0.71604
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,871
|
mbxml.py
|
rembo10_headphones/lib/musicbrainzngs/mbxml.py
|
# This file is part of the musicbrainzngs library
# Copyright (C) Alastair Porter, Adrian Sampson, and others
# This file is distributed under a BSD-2-Clause type license.
# See the COPYING file for more information.
import re
import xml.etree.ElementTree as ET
import logging
from . import util
def fixtag(tag, namespaces):
# given a decorated tag (of the form {uri}tag), return prefixed
# tag and namespace declaration, if any
if isinstance(tag, ET.QName):
tag = tag.text
namespace_uri, tag = tag[1:].split("}", 1)
prefix = namespaces.get(namespace_uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
namespaces[namespace_uri] = prefix
if prefix == "xml":
xmlns = None
else:
xmlns = ("xmlns:%s" % prefix, namespace_uri)
else:
xmlns = None
return "%s:%s" % (prefix, tag), xmlns
NS_MAP = {"http://musicbrainz.org/ns/mmd-2.0#": "ws2",
"http://musicbrainz.org/ns/ext#-2.0": "ext"}
_log = logging.getLogger("musicbrainzngs")
def get_error_message(error):
""" Given an error XML message from the webservice containing
<error><text>x</text><text>y</text></error>, return a list
of [x, y]"""
try:
tree = util.bytes_to_elementtree(error)
root = tree.getroot()
errors = []
if root.tag == "error":
for ch in root:
if ch.tag == "text":
errors.append(ch.text)
return errors
except ET.ParseError:
return None
def make_artist_credit(artists):
names = []
for artist in artists:
if isinstance(artist, dict):
if "name" in artist:
names.append(artist.get("name", ""))
else:
names.append(artist.get("artist", {}).get("name", ""))
else:
names.append(artist)
return "".join(names)
def parse_elements(valid_els, inner_els, element):
""" Extract single level subelements from an element.
For example, given the element:
<element>
<subelement>Text</subelement>
</element>
and a list valid_els that contains "subelement",
return a dict {'subelement': 'Text'}
Delegate the parsing of multi-level subelements to another function.
For example, given the element:
<element>
<subelement>
<a>Foo</a><b>Bar</b>
</subelement>
</element>
and a dictionary {'subelement': parse_subelement},
call parse_subelement(<subelement>) and
return a dict {'subelement': <result>}
if parse_subelement returns a tuple of the form
(True, {'subelement-key': <result>})
then merge the second element of the tuple into the
result (which may have a key other than 'subelement' or
more than 1 key)
"""
result = {}
for sub in element:
t = fixtag(sub.tag, NS_MAP)[0]
if ":" in t:
t = t.split(":")[1]
if t in valid_els:
result[t] = sub.text or ""
elif t in inner_els.keys():
inner_result = inner_els[t](sub)
if isinstance(inner_result, tuple) and inner_result[0]:
result.update(inner_result[1])
else:
result[t] = inner_result
# add counts for lists when available
m = re.match(r'([a-z0-9-]+)-list', t)
if m and "count" in sub.attrib:
result["%s-count" % m.group(1)] = int(sub.attrib["count"])
else:
_log.info("in <%s>, uncaught <%s>",
fixtag(element.tag, NS_MAP)[0], t)
return result
def parse_attributes(attributes, element):
""" Extract attributes from an element.
For example, given the element:
<element type="Group" />
and a list attributes that contains "type",
return a dict {'type': 'Group'}
"""
result = {}
for attr in element.attrib:
if "{" in attr:
a = fixtag(attr, NS_MAP)[0]
else:
a = attr
if a in attributes:
result[a] = element.attrib[attr]
else:
_log.info("in <%s>, uncaught attribute %s", fixtag(element.tag, NS_MAP)[0], attr)
return result
def parse_message(message):
tree = util.bytes_to_elementtree(message)
root = tree.getroot()
result = {}
valid_elements = {"area": parse_area,
"artist": parse_artist,
"instrument": parse_instrument,
"label": parse_label,
"place": parse_place,
"event": parse_event,
"release": parse_release,
"release-group": parse_release_group,
"series": parse_series,
"recording": parse_recording,
"work": parse_work,
"url": parse_url,
"disc": parse_disc,
"cdstub": parse_cdstub,
"isrc": parse_isrc,
"annotation-list": parse_annotation_list,
"area-list": parse_area_list,
"artist-list": parse_artist_list,
"label-list": parse_label_list,
"place-list": parse_place_list,
"event-list": parse_event_list,
"instrument-list": parse_instrument_list,
"release-list": parse_release_list,
"release-group-list": parse_release_group_list,
"series-list": parse_series_list,
"recording-list": parse_recording_list,
"work-list": parse_work_list,
"url-list": parse_url_list,
"collection-list": parse_collection_list,
"collection": parse_collection,
"message": parse_response_message
}
result.update(parse_elements([], valid_elements, root))
return result
def parse_response_message(message):
return parse_elements(["text"], {}, message)
def parse_collection_list(cl):
return [parse_collection(c) for c in cl]
def parse_collection(collection):
result = {}
attribs = ["id", "type", "entity-type"]
elements = ["name", "editor"]
inner_els = {"release-list": parse_release_list,
"artist-list": parse_artist_list,
"event-list": parse_event_list,
"place-list": parse_place_list,
"recording-list": parse_recording_list,
"work-list": parse_work_list}
result.update(parse_attributes(attribs, collection))
result.update(parse_elements(elements, inner_els, collection))
return result
def parse_annotation_list(al):
return [parse_annotation(a) for a in al]
def parse_annotation(annotation):
result = {}
attribs = ["type", "ext:score"]
elements = ["entity", "name", "text"]
result.update(parse_attributes(attribs, annotation))
result.update(parse_elements(elements, {}, annotation))
return result
def parse_lifespan(lifespan):
parts = parse_elements(["begin", "end", "ended"], {}, lifespan)
return parts
def parse_area_list(al):
return [parse_area(a) for a in al]
def parse_area(area):
result = {}
attribs = ["id", "type", "ext:score"]
elements = ["name", "sort-name", "disambiguation"]
inner_els = {"life-span": parse_lifespan,
"alias-list": parse_alias_list,
"relation-list": parse_relation_list,
"annotation": parse_annotation,
"iso-3166-1-code-list": parse_element_list,
"iso-3166-2-code-list": parse_element_list,
"iso-3166-3-code-list": parse_element_list}
result.update(parse_attributes(attribs, area))
result.update(parse_elements(elements, inner_els, area))
return result
def parse_artist_list(al):
return [parse_artist(a) for a in al]
def parse_artist(artist):
result = {}
attribs = ["id", "type", "ext:score"]
elements = ["name", "sort-name", "country", "user-rating",
"disambiguation", "gender", "ipi"]
inner_els = {"area": parse_area,
"begin-area": parse_area,
"end-area": parse_area,
"life-span": parse_lifespan,
"recording-list": parse_recording_list,
"relation-list": parse_relation_list,
"release-list": parse_release_list,
"release-group-list": parse_release_group_list,
"work-list": parse_work_list,
"tag-list": parse_tag_list,
"user-tag-list": parse_tag_list,
"rating": parse_rating,
"ipi-list": parse_element_list,
"isni-list": parse_element_list,
"alias-list": parse_alias_list,
"annotation": parse_annotation}
result.update(parse_attributes(attribs, artist))
result.update(parse_elements(elements, inner_els, artist))
return result
def parse_coordinates(c):
return parse_elements(['latitude', 'longitude'], {}, c)
def parse_place_list(pl):
return [parse_place(p) for p in pl]
def parse_place(place):
result = {}
attribs = ["id", "type", "ext:score"]
elements = ["name", "address",
"ipi", "disambiguation"]
inner_els = {"area": parse_area,
"coordinates": parse_coordinates,
"life-span": parse_lifespan,
"tag-list": parse_tag_list,
"user-tag-list": parse_tag_list,
"alias-list": parse_alias_list,
"relation-list": parse_relation_list,
"annotation": parse_annotation}
result.update(parse_attributes(attribs, place))
result.update(parse_elements(elements, inner_els, place))
return result
def parse_event_list(el):
return [parse_event(e) for e in el]
def parse_event(event):
result = {}
attribs = ["id", "type", "ext:score"]
elements = ["name", "time", "setlist", "cancelled", "disambiguation", "user-rating"]
inner_els = {"life-span": parse_lifespan,
"relation-list": parse_relation_list,
"alias-list": parse_alias_list,
"tag-list": parse_tag_list,
"user-tag-list": parse_tag_list,
"rating": parse_rating}
result.update(parse_attributes(attribs, event))
result.update(parse_elements(elements, inner_els, event))
return result
def parse_instrument(instrument):
result = {}
attribs = ["id", "type", "ext:score"]
elements = ["name", "description", "disambiguation"]
inner_els = {"relation-list": parse_relation_list,
"tag-list": parse_tag_list,
"alias-list": parse_alias_list,
"annotation": parse_annotation}
result.update(parse_attributes(attribs, instrument))
result.update(parse_elements(elements, inner_els, instrument))
return result
def parse_label_list(ll):
return [parse_label(l) for l in ll]
def parse_label(label):
result = {}
attribs = ["id", "type", "ext:score"]
elements = ["name", "sort-name", "country", "label-code", "user-rating",
"ipi", "disambiguation"]
inner_els = {"area": parse_area,
"life-span": parse_lifespan,
"release-list": parse_release_list,
"tag-list": parse_tag_list,
"user-tag-list": parse_tag_list,
"rating": parse_rating,
"ipi-list": parse_element_list,
"alias-list": parse_alias_list,
"relation-list": parse_relation_list,
"annotation": parse_annotation}
result.update(parse_attributes(attribs, label))
result.update(parse_elements(elements, inner_els, label))
return result
def parse_relation_target(tgt):
attributes = parse_attributes(['id'], tgt)
if 'id' in attributes:
return (True, {'target-id': attributes['id']})
else:
return (True, {'target-id': tgt.text})
def parse_relation_list(rl):
attribs = ["target-type"]
ttype = parse_attributes(attribs, rl)
key = "%s-relation-list" % ttype["target-type"]
return (True, {key: [parse_relation(r) for r in rl]})
def parse_relation(relation):
result = {}
attribs = ["type", "type-id"]
elements = ["target", "direction", "begin", "end", "ended", "ordering-key"]
inner_els = {"area": parse_area,
"artist": parse_artist,
"instrument": parse_instrument,
"label": parse_label,
"place": parse_place,
"event": parse_event,
"recording": parse_recording,
"release": parse_release,
"release-group": parse_release_group,
"series": parse_series,
"attribute-list": parse_element_list,
"work": parse_work,
"target": parse_relation_target
}
result.update(parse_attributes(attribs, relation))
result.update(parse_elements(elements, inner_els, relation))
# We parse attribute-list again to get attributes that have both
# text and attribute values
result.update(parse_elements(['target-credit'], {"attribute-list": parse_relation_attribute_list}, relation))
return result
def parse_relation_attribute_list(attributelist):
ret = []
for attribute in attributelist:
ret.append(parse_relation_attribute_element(attribute))
return (True, {"attributes": ret})
def parse_relation_attribute_element(element):
# Parses an attribute into a dictionary containing an element
# {"attribute": <text value>} and also an additional element
# containing any xml attributes.
# e.g <attribute value="BuxWV 1">number</attribute>
# -> {"attribute": "number", "value": "BuxWV 1"}
result = {}
for attr in element.attrib:
if "{" in attr:
a = fixtag(attr, NS_MAP)[0]
else:
a = attr
result[a] = element.attrib[attr]
result["attribute"] = element.text
return result
def parse_release(release):
result = {}
attribs = ["id", "ext:score"]
elements = ["title", "status", "disambiguation", "quality", "country",
"barcode", "date", "packaging", "asin"]
inner_els = {"text-representation": parse_text_representation,
"artist-credit": parse_artist_credit,
"label-info-list": parse_label_info_list,
"medium-list": parse_medium_list,
"release-group": parse_release_group,
"tag-list": parse_tag_list,
"user-tag-list": parse_tag_list,
"relation-list": parse_relation_list,
"annotation": parse_annotation,
"cover-art-archive": parse_caa,
"release-event-list": parse_release_event_list}
result.update(parse_attributes(attribs, release))
result.update(parse_elements(elements, inner_els, release))
if "artist-credit" in result:
result["artist-credit-phrase"] = make_artist_credit(
result["artist-credit"])
return result
def parse_medium_list(ml):
"""medium-list results from search have an additional
<track-count> element containing the number of tracks
over all mediums. Optionally add this"""
medium_list = []
track_count = None
for m in ml:
tag = fixtag(m.tag, NS_MAP)[0]
if tag == "ws2:medium":
medium_list.append(parse_medium(m))
elif tag == "ws2:track-count":
track_count = int(m.text)
ret = {"medium-list": medium_list}
if track_count is not None:
ret["medium-track-count"] = track_count
return (True, ret)
def parse_release_event_list(rel):
return [parse_release_event(re) for re in rel]
def parse_release_event(event):
result = {}
elements = ["date"]
inner_els = {"area": parse_area}
result.update(parse_elements(elements, inner_els, event))
return result
def parse_medium(medium):
result = {}
elements = ["position", "format", "title"]
inner_els = {"disc-list": parse_disc_list,
"pregap": parse_track,
"track-list": parse_track_list,
"data-track-list": parse_track_list}
result.update(parse_elements(elements, inner_els, medium))
return result
def parse_disc_list(dl):
return [parse_disc(d) for d in dl]
def parse_text_representation(textr):
return parse_elements(["language", "script"], {}, textr)
def parse_release_group(rg):
result = {}
attribs = ["id", "type", "ext:score"]
elements = ["title", "user-rating", "first-release-date", "primary-type",
"disambiguation"]
inner_els = {"artist-credit": parse_artist_credit,
"release-list": parse_release_list,
"tag-list": parse_tag_list,
"user-tag-list": parse_tag_list,
"secondary-type-list": parse_element_list,
"relation-list": parse_relation_list,
"rating": parse_rating,
"annotation": parse_annotation}
result.update(parse_attributes(attribs, rg))
result.update(parse_elements(elements, inner_els, rg))
if "artist-credit" in result:
result["artist-credit-phrase"] = make_artist_credit(result["artist-credit"])
return result
def parse_recording(recording):
result = {}
attribs = ["id", "ext:score"]
elements = ["title", "length", "user-rating", "disambiguation", "video"]
inner_els = {"artist-credit": parse_artist_credit,
"release-list": parse_release_list,
"tag-list": parse_tag_list,
"user-tag-list": parse_tag_list,
"rating": parse_rating,
"isrc-list": parse_external_id_list,
"relation-list": parse_relation_list,
"annotation": parse_annotation}
result.update(parse_attributes(attribs, recording))
result.update(parse_elements(elements, inner_els, recording))
if "artist-credit" in result:
result["artist-credit-phrase"] = make_artist_credit(result["artist-credit"])
return result
def parse_series_list(sl):
return [parse_series(s) for s in sl]
def parse_series(series):
result = {}
attribs = ["id", "type", "ext:score"]
elements = ["name", "disambiguation"]
inner_els = {"alias-list": parse_alias_list,
"relation-list": parse_relation_list,
"annotation": parse_annotation}
result.update(parse_attributes(attribs, series))
result.update(parse_elements(elements, inner_els, series))
return result
def parse_external_id_list(pl):
return [parse_attributes(["id"], p)["id"] for p in pl]
def parse_element_list(el):
return [e.text for e in el]
def parse_work_list(wl):
return [parse_work(w) for w in wl]
def parse_work(work):
result = {}
attribs = ["id", "ext:score", "type"]
elements = ["title", "user-rating", "language", "iswc", "disambiguation"]
inner_els = {"tag-list": parse_tag_list,
"user-tag-list": parse_tag_list,
"rating": parse_rating,
"alias-list": parse_alias_list,
"iswc-list": parse_element_list,
"relation-list": parse_relation_list,
"annotation": parse_response_message,
"attribute-list": parse_work_attribute_list
}
result.update(parse_attributes(attribs, work))
result.update(parse_elements(elements, inner_els, work))
return result
def parse_work_attribute_list(wal):
return [parse_work_attribute(wa) for wa in wal]
def parse_work_attribute(wa):
attribs = ["type"]
typeinfo = parse_attributes(attribs, wa)
result = {}
if typeinfo:
result = {"attribute": typeinfo["type"],
"value": wa.text}
return result
def parse_url_list(ul):
return [parse_url(u) for u in ul]
def parse_url(url):
result = {}
attribs = ["id"]
elements = ["resource"]
inner_els = {"relation-list": parse_relation_list}
result.update(parse_attributes(attribs, url))
result.update(parse_elements(elements, inner_els, url))
return result
def parse_disc(disc):
result = {}
attribs = ["id"]
elements = ["sectors"]
inner_els = {"release-list": parse_release_list,
"offset-list": parse_offset_list
}
result.update(parse_attributes(attribs, disc))
result.update(parse_elements(elements, inner_els, disc))
return result
def parse_cdstub(cdstub):
result = {}
attribs = ["id"]
elements = ["title", "artist", "barcode"]
inner_els = {"track-list": parse_track_list}
result.update(parse_attributes(attribs, cdstub))
result.update(parse_elements(elements, inner_els, cdstub))
return result
def parse_offset_list(ol):
return [int(o.text) for o in ol]
def parse_instrument_list(rl):
result = []
for r in rl:
result.append(parse_instrument(r))
return result
def parse_release_list(rl):
result = []
for r in rl:
result.append(parse_release(r))
return result
def parse_release_group_list(rgl):
result = []
for rg in rgl:
result.append(parse_release_group(rg))
return result
def parse_isrc(isrc):
result = {}
attribs = ["id"]
inner_els = {"recording-list": parse_recording_list}
result.update(parse_attributes(attribs, isrc))
result.update(parse_elements([], inner_els, isrc))
return result
def parse_recording_list(recs):
result = []
for r in recs:
result.append(parse_recording(r))
return result
def parse_artist_credit(ac):
result = []
for namecredit in ac:
result.append(parse_name_credit(namecredit))
join = parse_attributes(["joinphrase"], namecredit)
if "joinphrase" in join:
result.append(join["joinphrase"])
return result
def parse_name_credit(nc):
result = {}
elements = ["name"]
inner_els = {"artist": parse_artist}
result.update(parse_elements(elements, inner_els, nc))
return result
def parse_label_info_list(lil):
result = []
for li in lil:
result.append(parse_label_info(li))
return result
def parse_label_info(li):
result = {}
elements = ["catalog-number"]
inner_els = {"label": parse_label}
result.update(parse_elements(elements, inner_els, li))
return result
def parse_track_list(tl):
result = []
for t in tl:
result.append(parse_track(t))
return result
def parse_track(track):
result = {}
attribs = ["id"]
elements = ["number", "position", "title", "length"]
inner_els = {"recording": parse_recording,
"artist-credit": parse_artist_credit}
result.update(parse_attributes(attribs, track))
result.update(parse_elements(elements, inner_els, track))
if "artist-credit" in result.get("recording", {}) and "artist-credit" not in result:
result["artist-credit"] = result["recording"]["artist-credit"]
if "artist-credit" in result:
result["artist-credit-phrase"] = make_artist_credit(result["artist-credit"])
# Make a length field that contains track length or recording length
track_or_recording = None
if "length" in result:
track_or_recording = result["length"]
elif result.get("recording", {}).get("length"):
track_or_recording = result.get("recording", {}).get("length")
if track_or_recording:
result["track_or_recording_length"] = track_or_recording
return result
def parse_tag_list(tl):
return [parse_tag(t) for t in tl]
def parse_tag(tag):
result = {}
attribs = ["count"]
elements = ["name"]
result.update(parse_attributes(attribs, tag))
result.update(parse_elements(elements, {}, tag))
return result
def parse_rating(rating):
result = {}
attribs = ["votes-count"]
result.update(parse_attributes(attribs, rating))
result["rating"] = rating.text
return result
def parse_alias_list(al):
return [parse_alias(a) for a in al]
def parse_alias(alias):
result = {}
attribs = ["locale", "sort-name", "type", "primary",
"begin-date", "end-date"]
result.update(parse_attributes(attribs, alias))
result["alias"] = alias.text
return result
def parse_caa(caa_element):
result = {}
elements = ["artwork", "count", "front", "back", "darkened"]
result.update(parse_elements(elements, {}, caa_element))
return result
###
def make_barcode_request(release2barcode):
NS = "http://musicbrainz.org/ns/mmd-2.0#"
root = ET.Element("{%s}metadata" % NS)
rel_list = ET.SubElement(root, "{%s}release-list" % NS)
for release, barcode in release2barcode.items():
rel_xml = ET.SubElement(rel_list, "{%s}release" % NS)
bar_xml = ET.SubElement(rel_xml, "{%s}barcode" % NS)
rel_xml.set("{%s}id" % NS, release)
bar_xml.text = barcode
return ET.tostring(root, "utf-8")
def make_tag_request(**kwargs):
NS = "http://musicbrainz.org/ns/mmd-2.0#"
root = ET.Element("{%s}metadata" % NS)
for entity_type in ['artist', 'label', 'place', 'recording', 'release', 'release_group', 'work']:
entity_tags = kwargs.pop(entity_type + '_tags', None)
if entity_tags is not None:
e_list = ET.SubElement(root, "{%s}%s-list" % (NS, entity_type.replace('_', '-')))
for e, tags in entity_tags.items():
e_xml = ET.SubElement(e_list, "{%s}%s" % (NS, entity_type.replace('_', '-')))
e_xml.set("{%s}id" % NS, e)
taglist = ET.SubElement(e_xml, "{%s}user-tag-list" % NS)
for tag in tags:
usertag_xml = ET.SubElement(taglist, "{%s}user-tag" % NS)
name_xml = ET.SubElement(usertag_xml, "{%s}name" % NS)
name_xml.text = tag
if kwargs.keys():
raise TypeError("make_tag_request() got an unexpected keyword argument '%s'" % kwargs.popitem()[0])
return ET.tostring(root, "utf-8")
def make_rating_request(**kwargs):
NS = "http://musicbrainz.org/ns/mmd-2.0#"
root = ET.Element("{%s}metadata" % NS)
for entity_type in ['artist', 'label', 'recording', 'release_group', 'work']:
entity_ratings = kwargs.pop(entity_type + '_ratings', None)
if entity_ratings is not None:
e_list = ET.SubElement(root, "{%s}%s-list" % (NS, entity_type.replace('_', '-')))
for e, rating in entity_ratings.items():
e_xml = ET.SubElement(e_list, "{%s}%s" % (NS, entity_type.replace('_', '-')))
e_xml.set("{%s}id" % NS, e)
rating_xml = ET.SubElement(e_xml, "{%s}user-rating" % NS)
rating_xml.text = str(rating)
if kwargs.keys():
raise TypeError("make_rating_request() got an unexpected keyword argument '%s'" % kwargs.popitem()[0])
return ET.tostring(root, "utf-8")
def make_isrc_request(recording2isrcs):
NS = "http://musicbrainz.org/ns/mmd-2.0#"
root = ET.Element("{%s}metadata" % NS)
rec_list = ET.SubElement(root, "{%s}recording-list" % NS)
for rec, isrcs in recording2isrcs.items():
if len(isrcs) > 0:
rec_xml = ET.SubElement(rec_list, "{%s}recording" % NS)
rec_xml.set("{%s}id" % NS, rec)
isrc_list_xml = ET.SubElement(rec_xml, "{%s}isrc-list" % NS)
isrc_list_xml.set("{%s}count" % NS, str(len(isrcs)))
for isrc in isrcs:
isrc_xml = ET.SubElement(isrc_list_xml, "{%s}isrc" % NS)
isrc_xml.set("{%s}id" % NS, isrc)
return ET.tostring(root, "utf-8")
| 28,151
|
Python
|
.py
| 680
| 32.386765
| 113
| 0.593217
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,872
|
caa.py
|
rembo10_headphones/lib/musicbrainzngs/caa.py
|
# This file is part of the musicbrainzngs library
# Copyright (C) Alastair Porter, Wieland Hoffmann, and others
# This file is distributed under a BSD-2-Clause type license.
# See the COPYING file for more information.
__all__ = [
'set_caa_hostname', 'get_image_list', 'get_release_group_image_list',
'get_release_group_image_front', 'get_image_front', 'get_image_back',
'get_image'
]
import json
from musicbrainzngs import compat
from musicbrainzngs import musicbrainz
from musicbrainzngs.util import _unicode
hostname = "coverartarchive.org"
https = True
def set_caa_hostname(new_hostname, use_https=False):
"""Set the base hostname for Cover Art Archive requests.
Defaults to 'coverartarchive.org', accessing over https.
For backwards compatibility, `use_https` is False by default.
:param str new_hostname: The hostname (and port) of the CAA server to connect to
:param bool use_https: `True` if the host should be accessed using https. Default is `False`
"""
global hostname
global https
hostname = new_hostname
https = use_https
def _caa_request(mbid, imageid=None, size=None, entitytype="release"):
""" Make a CAA request.
:param imageid: ``front``, ``back`` or a number from the listing obtained
with :meth:`get_image_list`.
:type imageid: str
:param size: "250", "500", "1200"
:type size: str or None
:param entitytype: ``release`` or ``release-group``
:type entitytype: str
"""
# Construct the full URL for the request, including hostname and
# query string.
path = [entitytype, mbid]
if imageid and size:
path.append("%s-%s" % (imageid, size))
elif imageid:
path.append(imageid)
url = compat.urlunparse((
'https' if https else 'http',
hostname,
'/%s' % '/'.join(path),
'',
'',
''
))
musicbrainz._log.debug("GET request for %s" % (url, ))
# Set up HTTP request handler and URL opener.
httpHandler = compat.HTTPHandler(debuglevel=0)
handlers = [httpHandler]
opener = compat.build_opener(*handlers)
# Make request.
req = musicbrainz._MusicbrainzHttpRequest("GET", url, None)
# Useragent isn't needed for CAA, but we'll add it if it exists
if musicbrainz._useragent != "":
req.add_header('User-Agent', musicbrainz._useragent)
musicbrainz._log.debug("requesting with UA %s" % musicbrainz._useragent)
resp = musicbrainz._safe_read(opener, req, None)
# TODO: The content type declared by the CAA for JSON files is
# 'applicaiton/octet-stream'. This is not useful to detect whether the
# content is JSON, so default to decoding JSON if no imageid was supplied.
# http://tickets.musicbrainz.org/browse/CAA-75
if imageid:
# If we asked for an image, return the image
return resp
else:
# Otherwise it's json
data = _unicode(resp)
return json.loads(data)
def get_image_list(releaseid):
"""Get the list of cover art associated with a release.
The return value is the deserialized response of the `JSON listing
<http://musicbrainz.org/doc/Cover_Art_Archive/API#.2Frelease.2F.7Bmbid.7D.2F>`_
returned by the Cover Art Archive API.
If an error occurs then a :class:`~musicbrainzngs.ResponseError` will
be raised with one of the following HTTP codes:
* 400: `releaseid` is not a valid UUID
* 404: The release with an MBID of `releaseid` does not exist or
there is no cover art available for it.
* 503: Ratelimit exceeded
"""
return _caa_request(releaseid)
def get_release_group_image_list(releasegroupid):
"""Get the list of cover art associated with a release group.
The return value is the deserialized response of the `JSON listing
<http://musicbrainz.org/doc/Cover_Art_Archive/API#.2Frelease-group.2F.7Bmbid.7D.2F>`_
returned by the Cover Art Archive API.
If an error occurs then a :class:`~musicbrainzngs.ResponseError` will
be raised with one of the following HTTP codes:
* 400: `releasegroupid` is not a valid UUID
* 404: The release group with an MBID of `releasegroupid` does not exist or
there is no cover art available for it.
* 503: Ratelimit exceeded
"""
return _caa_request(releasegroupid, entitytype="release-group")
def get_release_group_image_front(releasegroupid, size=None):
"""Download the front cover art for a release group.
The `size` argument and the possible error conditions are the same as for
:meth:`get_image`.
"""
return get_image(releasegroupid, "front", size=size,
entitytype="release-group")
def get_image_front(releaseid, size=None):
"""Download the front cover art for a release.
The `size` argument and the possible error conditions are the same as for
:meth:`get_image`.
"""
return get_image(releaseid, "front", size=size)
def get_image_back(releaseid, size=None):
"""Download the back cover art for a release.
The `size` argument and the possible error conditions are the same as for
:meth:`get_image`.
"""
return get_image(releaseid, "back", size=size)
def get_image(mbid, coverid, size=None, entitytype="release"):
"""Download cover art for a release. The coverart file to download
is specified by the `coverid` argument.
If `size` is not specified, download the largest copy present, which can be
very large.
If an error occurs then a :class:`~musicbrainzngs.ResponseError`
will be raised with one of the following HTTP codes:
* 400: `releaseid` is not a valid UUID or `coverid` is invalid
* 404: The release with an MBID of `releaseid` does not exist or no cover
art with an id of `coverid` exists.
* 503: Ratelimit exceeded
:param coverid: ``front``, ``back`` or a number from the listing obtained
with :meth:`get_image_list`
:type coverid: int or str
:param size: "250", "500", "1200" or None. If it is None, the largest
available picture will be downloaded. If the image originally
uploaded to the Cover Art Archive was smaller than the
requested size, only the original image will be returned.
:type size: str or None
:param entitytype: The type of entity for which to download the cover art.
This is either ``release`` or ``release-group``.
:type entitytype: str
:return: The binary image data
:type: str
"""
if isinstance(coverid, int):
coverid = "%d" % (coverid, )
if isinstance(size, int):
size = "%d" % (size, )
return _caa_request(mbid, coverid, size=size, entitytype=entitytype)
| 6,779
|
Python
|
.py
| 149
| 39.442953
| 96
| 0.685081
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,873
|
compat.py
|
rembo10_headphones/lib/musicbrainzngs/compat.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Kenneth Reitz.
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
pythoncompat
"""
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
# ---------
# Specifics
# ---------
if is_py2:
from StringIO import StringIO
from urllib2 import HTTPPasswordMgr, HTTPDigestAuthHandler, Request,\
HTTPHandler, build_opener, HTTPError, URLError
from httplib import BadStatusLine, HTTPException
from urlparse import urlunparse
from urllib import urlencode, quote_plus
bytes = str
unicode = unicode
basestring = basestring
elif is_py3:
from io import StringIO
from urllib.request import HTTPPasswordMgr, HTTPDigestAuthHandler, Request,\
HTTPHandler, build_opener
from urllib.error import HTTPError, URLError
from http.client import HTTPException, BadStatusLine
from urllib.parse import urlunparse, urlencode, quote_plus
unicode = str
bytes = bytes
basestring = (str,bytes)
| 1,719
|
Python
|
.py
| 48
| 33.916667
| 77
| 0.767189
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,874
|
musicbrainz.py
|
rembo10_headphones/lib/musicbrainzngs/musicbrainz.py
|
# This file is part of the musicbrainzngs library
# Copyright (C) Alastair Porter, Adrian Sampson, and others
# This file is distributed under a BSD-2-Clause type license.
# See the COPYING file for more information.
import base64
import re
import threading
import time
import logging
import socket
import hashlib
import locale
import sys
import json
import xml.etree.ElementTree as etree
from xml.parsers import expat
from warnings import warn
from musicbrainzngs import mbxml
from musicbrainzngs import util
from musicbrainzngs import compat
_version = "0.7.1modified"
_log = logging.getLogger("musicbrainzngs")
LUCENE_SPECIAL = r'([+\-&|!(){}\[\]\^"~*?:\\\/])'
# Constants for validation.
RELATABLE_TYPES = ['area', 'artist', 'label', 'place', 'event', 'recording', 'release', 'release-group', 'series', 'url', 'work', 'instrument']
RELATION_INCLUDES = [entity + '-rels' for entity in RELATABLE_TYPES]
TAG_INCLUDES = ["tags", "user-tags", "genres", "user-genres"]
RATING_INCLUDES = ["ratings", "user-ratings"]
VALID_INCLUDES = {
'area' : ["aliases", "annotation"] + RELATION_INCLUDES + TAG_INCLUDES,
'artist': [
"recordings", "releases", "release-groups", "works", # Subqueries
"various-artists", "discids", "media", "isrcs",
"aliases", "annotation"
] + RELATION_INCLUDES + TAG_INCLUDES + RATING_INCLUDES,
'annotation': [
],
'instrument': ["aliases", "annotation"
] + RELATION_INCLUDES + TAG_INCLUDES,
'label': [
"releases", # Subqueries
"discids", "media",
"aliases", "annotation"
] + RELATION_INCLUDES + TAG_INCLUDES + RATING_INCLUDES,
'place' : ["aliases", "annotation"] + RELATION_INCLUDES + TAG_INCLUDES,
'event' : ["aliases"] + RELATION_INCLUDES + TAG_INCLUDES + RATING_INCLUDES,
'recording': [
"artists", "releases", # Subqueries
"discids", "media", "artist-credits", "isrcs",
"work-level-rels", "annotation", "aliases"
] + TAG_INCLUDES + RATING_INCLUDES + RELATION_INCLUDES,
'release': [
"artists", "labels", "recordings", "release-groups", "media",
"artist-credits", "discids", "isrcs",
"recording-level-rels", "work-level-rels", "annotation", "aliases"
] + TAG_INCLUDES + RELATION_INCLUDES,
'release-group': [
"artists", "releases", "discids", "media",
"artist-credits", "annotation", "aliases"
] + TAG_INCLUDES + RATING_INCLUDES + RELATION_INCLUDES,
'series': [
"annotation", "aliases"
] + RELATION_INCLUDES + TAG_INCLUDES,
'work': [
"aliases", "annotation"
] + TAG_INCLUDES + RATING_INCLUDES + RELATION_INCLUDES,
'url': RELATION_INCLUDES,
'discid': [ # Discid should be the same as release
"artists", "labels", "recordings", "release-groups", "media",
"artist-credits", "discids", "isrcs",
"recording-level-rels", "work-level-rels", "annotation", "aliases"
] + RELATION_INCLUDES,
'isrc': ["artists", "releases", "isrcs"],
'iswc': ["artists"],
'collection': ['releases'],
}
VALID_BROWSE_INCLUDES = {
'artist': ["aliases"] + TAG_INCLUDES + RATING_INCLUDES + RELATION_INCLUDES,
'event': ["aliases"] + TAG_INCLUDES + RATING_INCLUDES + RELATION_INCLUDES,
'label': ["aliases"] + TAG_INCLUDES + RATING_INCLUDES + RELATION_INCLUDES,
'recording': ["artist-credits", "isrcs"] + TAG_INCLUDES + RATING_INCLUDES + RELATION_INCLUDES,
'release': ["artist-credits", "labels", "recordings", "isrcs",
"release-groups", "media", "discids"] + RELATION_INCLUDES,
'place': ["aliases"] + TAG_INCLUDES + RELATION_INCLUDES,
'release-group': ["artist-credits"] + TAG_INCLUDES + RATING_INCLUDES + RELATION_INCLUDES,
'url': RELATION_INCLUDES,
'work': ["aliases", "annotation"] + TAG_INCLUDES + RATING_INCLUDES + RELATION_INCLUDES,
}
#: These can be used to filter whenever releases are includes or browsed
VALID_RELEASE_TYPES = [
"nat",
"album", "single", "ep", "broadcast", "other", # primary types
"compilation", "soundtrack", "spokenword", "interview", "audiobook",
"live", "remix", "dj-mix", "mixtape/street", "audio drama", # secondary types
"demo" #headphones
]
#: These can be used to filter whenever releases or release-groups are involved
VALID_RELEASE_STATUSES = ["official", "promotion", "bootleg", "pseudo-release"]
VALID_SEARCH_FIELDS = {
'annotation': [
'entity', 'name', 'text', 'type'
],
'area': [
'aid', 'alias', 'area', 'areaaccent', 'begin', 'comment', 'end',
'ended', 'iso', 'iso1', 'iso2', 'iso3', 'sortname', 'tag', 'type'
],
'artist': [
'alias', 'area', 'arid', 'artist', 'artistaccent', 'begin', 'beginarea',
'comment', 'country', 'end', 'endarea', 'ended', 'gender',
'ipi', 'isni', 'primary_alias', 'sortname', 'tag', 'type'
],
'event': [
'aid', 'alias', 'area', 'arid', 'artist', 'begin', 'comment', 'eid',
'end', 'ended', 'event', 'eventaccent', 'pid', 'place', 'tag', 'type'
],
'instrument': [
'alias', 'comment', 'description', 'iid', 'instrument',
'instrumentaccent', 'tag', 'type'
],
'label': [
'alias', 'area', 'begin', 'code', 'comment', 'country', 'end', 'ended',
'ipi', 'label', 'labelaccent', 'laid', 'release_count', 'sortname',
'tag', 'type'
],
'place': [
'address', 'alias', 'area', 'begin', 'comment', 'end', 'ended', 'lat', 'long',
'pid', 'place', 'placeaccent', 'type'
],
'recording': [
'alias', 'arid', 'artist', 'artistname', 'comment', 'country',
'creditname', 'date', 'dur', 'format', 'isrc', 'number', 'position',
'primarytype', 'qdur', 'recording', 'recordingaccent', 'reid',
'release', 'rgid', 'rid', 'secondarytype', 'status', 'tag', 'tid',
'tnum', 'tracks', 'tracksrelease', 'type', 'video'],
'release-group': [
'alias', 'arid', 'artist', 'artistname', 'comment', 'creditname',
'primarytype', 'reid', 'release', 'releasegroup', 'releasegroupaccent',
'releases', 'rgid', 'secondarytype', 'status', 'tag', 'type'
],
'release': [
'alias', 'arid', 'artist', 'artistname', 'asin', 'barcode', 'catno',
'comment', 'country', 'creditname', 'date', 'discids', 'discidsmedium',
'format', 'label', 'laid', 'lang', 'mediums', 'primarytype', 'quality',
'reid', 'release', 'releaseaccent', 'rgid', 'script', 'secondarytype',
'status', 'tag', 'tracks', 'tracksmedium', 'type'
],
'series': [
'alias', 'comment', 'orderingattribute', 'series', 'seriesaccent',
'sid', 'tag', 'type'
],
'work': [
'alias', 'arid', 'artist', 'comment', 'iswc', 'lang', 'recording',
'recording_count', 'rid', 'tag', 'type', 'wid', 'work', 'workaccent'
]
}
# Constants
class AUTH_YES: pass
class AUTH_NO: pass
class AUTH_IFSET: pass
AUTH_REQUIRED_INCLUDES = ["user-tags", "user-ratings", "user-genres"]
# Exceptions.
class MusicBrainzError(Exception):
"""Base class for all exceptions related to MusicBrainz."""
pass
class UsageError(MusicBrainzError):
"""Error related to misuse of the module API."""
pass
class InvalidSearchFieldError(UsageError):
pass
class InvalidIncludeError(UsageError):
def __init__(self, msg='Invalid Includes', reason=None):
super(InvalidIncludeError, self).__init__(self)
self.msg = msg
self.reason = reason
def __str__(self):
return self.msg
class InvalidFilterError(UsageError):
def __init__(self, msg='Invalid Includes', reason=None):
super(InvalidFilterError, self).__init__(self)
self.msg = msg
self.reason = reason
def __str__(self):
return self.msg
class WebServiceError(MusicBrainzError):
"""Error related to MusicBrainz API requests."""
def __init__(self, message=None, cause=None):
"""Pass ``cause`` if this exception was caused by another
exception.
"""
self.message = message
self.cause = cause
def __str__(self):
if self.message:
msg = "%s, " % self.message
else:
msg = ""
msg += "caused by: %s" % str(self.cause)
return msg
class NetworkError(WebServiceError):
"""Problem communicating with the MB server."""
pass
class ResponseError(WebServiceError):
"""Bad response sent by the MB server."""
pass
class AuthenticationError(WebServiceError):
"""Received a HTTP 401 response while accessing a protected resource."""
pass
# Helpers for validating and formatting allowed sets.
def _check_includes_impl(includes, valid_includes):
for i in includes:
if i not in valid_includes:
raise InvalidIncludeError("Bad includes: "
"%s is not a valid include" % i)
def _check_includes(entity, inc):
_check_includes_impl(inc, VALID_INCLUDES[entity])
def _check_filter(values, valid):
for v in values:
if v not in valid:
raise InvalidFilterError(v)
def _check_filter_and_make_params(entity, includes, release_status=[], release_type=[]):
"""Check that the status or type values are valid. Then, check that
the filters can be used with the given includes. Return a params
dict that can be passed to _do_mb_query.
"""
if isinstance(release_status, compat.basestring):
release_status = [release_status]
if isinstance(release_type, compat.basestring):
release_type = [release_type]
_check_filter(release_status, VALID_RELEASE_STATUSES)
_check_filter(release_type, VALID_RELEASE_TYPES)
if (release_status
and "releases" not in includes and entity != "release"):
raise InvalidFilterError("Can't have a status with no release include")
if (release_type
and "release-groups" not in includes and "releases" not in includes
and entity not in ["release-group", "release"]):
raise InvalidFilterError("Can't have a release type "
"with no releases or release-groups involved")
# Build parameters.
params = {}
if len(release_status):
params["status"] = "|".join(release_status)
if len(release_type):
params["type"] = "|".join(release_type)
return params
def _docstring_get(entity):
includes = list(VALID_INCLUDES.get(entity, []))
return _docstring_impl("includes", includes)
def _docstring_browse(entity):
includes = list(VALID_BROWSE_INCLUDES.get(entity, []))
return _docstring_impl("includes", includes)
def _docstring_search(entity):
search_fields = list(VALID_SEARCH_FIELDS.get(entity, []))
return _docstring_impl("fields", search_fields)
def _docstring_impl(name, values):
def _decorator(func):
vstr = ", ".join(values)
args = {name: vstr}
if func.__doc__:
func.__doc__ = func.__doc__.format(**args)
return func
return _decorator
# Global authentication and endpoint details.
user = password = ""
hostname = "musicbrainz.org"
https = True
_client = ""
_useragent = ""
mb_auth = False
def auth(u, p):
"""Set the username and password to be used in subsequent queries to
the MusicBrainz XML API that require authentication.
"""
global user, password
user = u
password = p
def hpauth(u, p):
"""Set the username and password to be used in subsequent queries to
the MusicBrainz XML API that require authentication.
"""
global hpuser, hppassword, mb_auth
hpuser = u
hppassword = p
mb_auth = True
def disable_hpauth():
"""Disable the authentication for MusicBrainz XML API
"""
global mb_auth
mb_auth = False
def set_useragent(app, version, contact=None):
"""Set the User-Agent to be used for requests to the MusicBrainz webservice.
This must be set before requests are made."""
global _useragent, _client
if not app or not version:
raise ValueError("App and version can not be empty")
if contact is not None:
_useragent = "%s/%s python-musicbrainzngs/%s ( %s )" % (app, version, _version, contact)
else:
_useragent = "%s/%s python-musicbrainzngs/%s" % (app, version, _version)
_client = "%s-%s" % (app, version)
_log.debug("set user-agent to %s" % _useragent)
def set_hostname(new_hostname, use_https=False):
"""Set the hostname for MusicBrainz webservice requests.
Defaults to 'musicbrainz.org', accessing over https.
For backwards compatibility, `use_https` is False by default.
:param str new_hostname: The hostname (and port) of the MusicBrainz server to connect to
:param bool use_https: `True` if the host should be accessed using https. Default is `False`
Specify a non-standard port by adding it to the hostname,
for example 'localhost:8000'."""
global hostname
global https
hostname = new_hostname
https = use_https
# Rate limiting.
limit_interval = 1.0
limit_requests = 1
do_rate_limit = True
def set_rate_limit(limit_or_interval=1.0, new_requests=1):
"""Sets the rate limiting behavior of the module. Must be invoked
before the first Web service call.
If the `limit_or_interval` parameter is set to False then
rate limiting will be disabled. If it is a number then only
a set number of requests (`new_requests`) will be made per
given interval (`limit_or_interval`).
"""
global limit_interval
global limit_requests
global do_rate_limit
if isinstance(limit_or_interval, bool):
do_rate_limit = limit_or_interval
else:
if limit_or_interval <= 0.0:
raise ValueError("limit_or_interval can't be less than 0")
if new_requests <= 0:
raise ValueError("new_requests can't be less than 0")
do_rate_limit = True
limit_interval = limit_or_interval
limit_requests = new_requests
class _rate_limit(object):
"""A decorator that limits the rate at which the function may be
called. The rate is controlled by the `limit_interval` and
`limit_requests` global variables. The limiting is thread-safe;
only one thread may be in the function at a time (acts like a
monitor in this sense). The globals must be set before the first
call to the limited function.
"""
def __init__(self, fun):
self.fun = fun
self.last_call = 0.0
self.lock = threading.Lock()
self.remaining_requests = None # Set on first invocation.
def _update_remaining(self):
"""Update remaining requests based on the elapsed time since
they were last calculated.
"""
# On first invocation, we have the maximum number of requests
# available.
if self.remaining_requests is None:
self.remaining_requests = float(limit_requests)
else:
since_last_call = time.time() - self.last_call
self.remaining_requests += since_last_call * \
(limit_requests / limit_interval)
self.remaining_requests = min(self.remaining_requests,
float(limit_requests))
self.last_call = time.time()
def __call__(self, *args, **kwargs):
with self.lock:
if do_rate_limit:
self._update_remaining()
# Delay if necessary.
while self.remaining_requests < 0.999:
time.sleep((1.0 - self.remaining_requests) *
(limit_requests / limit_interval))
self._update_remaining()
# Call the original function, "paying" for this call.
self.remaining_requests -= 1.0
return self.fun(*args, **kwargs)
# From pymb2
class _RedirectPasswordMgr(compat.HTTPPasswordMgr):
def __init__(self):
self._realms = { }
def find_user_password(self, realm, uri):
# ignoring the uri parameter intentionally
try:
return self._realms[realm]
except KeyError:
return (None, None)
def add_password(self, realm, uri, username, password):
# ignoring the uri parameter intentionally
self._realms[realm] = (username, password)
class _DigestAuthHandler(compat.HTTPDigestAuthHandler):
def get_authorization (self, req, chal):
qop = chal.get ('qop', None)
if qop and ',' in qop and 'auth' in qop.split (','):
chal['qop'] = 'auth'
return compat.HTTPDigestAuthHandler.get_authorization (self, req, chal)
def _encode_utf8(self, msg):
"""The MusicBrainz server also accepts UTF-8 encoded passwords."""
encoding = sys.stdin.encoding or locale.getpreferredencoding()
try:
# This works on Python 2 (msg in bytes)
msg = msg.decode(encoding)
except AttributeError:
# on Python 3 (msg is already in unicode)
pass
return msg.encode("utf-8")
def get_algorithm_impls(self, algorithm):
# algorithm should be case-insensitive according to RFC2617
algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if algorithm == 'MD5':
H = lambda x: hashlib.md5(self._encode_utf8(x)).hexdigest()
elif algorithm == 'SHA':
H = lambda x: hashlib.sha1(self._encode_utf8(x)).hexdigest()
# XXX MD5-sess
KD = lambda s, d: H("%s:%s" % (s, d))
return H, KD
class _MusicbrainzHttpRequest(compat.Request):
""" A custom request handler that allows DELETE and PUT"""
def __init__(self, method, url, data=None):
compat.Request.__init__(self, url, data)
allowed_m = ["GET", "POST", "DELETE", "PUT"]
if method not in allowed_m:
raise ValueError("invalid method: %s" % method)
self.method = method
def get_method(self):
return self.method
# Core (internal) functions for calling the MB API.
def _safe_read(opener, req, body=None, max_retries=3, retry_delay_delta=2.0):
"""Open an HTTP request with a given URL opener and (optionally) a
request body. Transient errors lead to retries. Permanent errors
and repeated errors are translated into a small set of handleable
exceptions. Return a bytestring.
"""
last_exc = None
for retry_num in range(max_retries):
if retry_num: # Not the first try: delay an increasing amount.
_log.info("retrying after delay (#%i)" % retry_num)
time.sleep(retry_num * retry_delay_delta)
try:
if body:
f = opener.open(req, body)
else:
f = opener.open(req)
return f.read()
except compat.HTTPError as exc:
if exc.code in (400, 404, 411):
# Bad request, not found, etc.
raise ResponseError(cause=exc)
elif exc.code in (503, 502, 500):
# Rate limiting, internal overloading...
_log.info("HTTP error %i" % exc.code)
elif exc.code in (401, ):
raise AuthenticationError(cause=exc)
else:
# Other, unknown error. Should handle more cases, but
# retrying for now.
_log.info("unknown HTTP error %i" % exc.code)
last_exc = exc
except compat.BadStatusLine as exc:
_log.info("bad status line")
last_exc = exc
except compat.HTTPException as exc:
_log.info("miscellaneous HTTP exception: %s" % str(exc))
last_exc = exc
except compat.URLError as exc:
if isinstance(exc.reason, socket.error):
code = exc.reason.errno
if code == 104: # "Connection reset by peer."
continue
raise NetworkError(cause=exc)
except socket.timeout as exc:
_log.info("socket timeout")
last_exc = exc
except socket.error as exc:
if exc.errno == 104:
continue
raise NetworkError(cause=exc)
except IOError as exc:
raise NetworkError(cause=exc)
# Out of retries!
raise NetworkError("retried %i times" % max_retries, last_exc)
# Get the XML parsing exceptions to catch. The behavior chnaged with Python 2.7
# and ElementTree 1.3.
if hasattr(etree, 'ParseError'):
ETREE_EXCEPTIONS = (etree.ParseError, expat.ExpatError)
else:
ETREE_EXCEPTIONS = (expat.ExpatError)
# Parsing setup
def mb_parser_null(resp):
"""Return the raw response (XML)"""
return resp
def mb_parser_xml(resp):
"""Return a Python dict representing the XML response"""
# Parse the response.
try:
return mbxml.parse_message(resp)
except UnicodeError as exc:
raise ResponseError(cause=exc)
except Exception as exc:
if isinstance(exc, ETREE_EXCEPTIONS):
raise ResponseError(cause=exc)
else:
raise
# Defaults
parser_fun = mb_parser_xml
ws_format = "xml"
def set_parser(new_parser_fun=None):
"""Sets the function used to parse the response from the
MusicBrainz web service.
If no parser is given, the parser is reset to the default parser
:func:`mb_parser_xml`.
"""
global parser_fun
if new_parser_fun is None:
new_parser_fun = mb_parser_xml
if not callable(new_parser_fun):
raise ValueError("new_parser_fun must be callable")
parser_fun = new_parser_fun
def set_format(fmt="xml"):
"""Sets the format that should be returned by the Web Service.
The server currently supports `xml` and `json`.
This method will set a default parser for the specified format,
but you can modify it with :func:`set_parser`.
.. warning:: The json format used by the server is different from
the json format returned by the `musicbrainzngs` internal parser
when using the `xml` format! This format may change at any time.
"""
global ws_format
if fmt == "xml":
ws_format = fmt
set_parser() # set to default
elif fmt == "json":
ws_format = fmt
warn("The json format is non-official and may change at any time")
set_parser(json.loads)
else:
raise ValueError("invalid format: %s" % fmt)
@_rate_limit
def _mb_request(path, method='GET', auth_required=AUTH_NO,
client_required=False, args=None, data=None, body=None):
"""Makes a request for the specified `path` (endpoint) on /ws/2 on
the globally-specified hostname. Parses the responses and returns
the resulting object. `auth_required` and `client_required` control
whether exceptions should be raised if the username/password and
client are left unspecified, respectively.
"""
global parser_fun
if args is None:
args = {}
else:
args = dict(args) or {}
if _useragent == "":
raise UsageError("set a proper user-agent with "
"set_useragent(\"application name\", \"application version\", \"contact info (preferably URL or email for your application)\")")
if client_required:
args["client"] = _client
if ws_format != "xml":
args["fmt"] = ws_format
# Convert args from a dictionary to a list of tuples
# so that the ordering of elements is stable for easy
# testing (in this case we order alphabetically)
# Encode Unicode arguments using UTF-8.
newargs = []
for key, value in sorted(args.items()):
if isinstance(value, compat.unicode):
value = value.encode('utf8')
newargs.append((key, value))
# Construct the full URL for the request, including hostname and
# query string.
url = compat.urlunparse((
'https' if https else 'http',
hostname,
'/ws/2/%s' % path,
'',
compat.urlencode(newargs),
''
))
_log.debug("%s request for %s" % (method, url))
# Set up HTTP request handler and URL opener.
httpHandler = compat.HTTPHandler(debuglevel=0)
handlers = [httpHandler]
# Add credentials if required.
add_auth = False
if auth_required == AUTH_YES:
_log.debug("Auth required for %s" % url)
if not user:
raise UsageError("authorization required; "
"use auth(user, pass) first")
add_auth = True
if auth_required == AUTH_IFSET and user:
_log.debug("Using auth for %s because user and pass is set" % url)
add_auth = True
if add_auth:
passwordMgr = _RedirectPasswordMgr()
authHandler = _DigestAuthHandler(passwordMgr)
authHandler.add_password("musicbrainz.org", (), user, password)
handlers.append(authHandler)
opener = compat.build_opener(*handlers)
# Make request.
req = _MusicbrainzHttpRequest(method, url, data)
req.add_header('User-Agent', _useragent)
# Add headphones credentials
if mb_auth:
credentials = f"{hpuser}:{hppassword}"
base64bytes = base64.encodebytes(credentials.encode('utf-8'))
base64string = base64bytes.decode('utf-8').replace('\n', '')
req.add_header("Authorization", f"Basic {base64string}")
_log.debug("requesting with UA %s" % _useragent)
if body:
req.add_header('Content-Type', 'application/xml; charset=UTF-8')
elif not data and not req.has_header('Content-Length'):
# Explicitly indicate zero content length if no request data
# will be sent (avoids HTTP 411 error).
req.add_header('Content-Length', '0')
resp = _safe_read(opener, req, body)
return parser_fun(resp)
def _get_auth_type(entity, id, includes):
""" Some calls require authentication. This returns
a constant (Yes, No, IfSet) for the auth status of the call.
"""
if "user-tags" in includes or "user-ratings" in includes or "user-genres" in includes:
return AUTH_YES
elif entity.startswith("collection"):
if not id:
return AUTH_YES
else:
return AUTH_IFSET
else:
return AUTH_NO
def _do_mb_query(entity, id, includes=[], params={}):
"""Make a single GET call to the MusicBrainz XML API. `entity` is a
string indicated the type of object to be retrieved. The id may be
empty, in which case the query is a search. `includes` is a list
of strings that must be valid includes for the entity type. `params`
is a dictionary of additional parameters for the API call. The
response is parsed and returned.
"""
# Build arguments.
if not isinstance(includes, list):
includes = [includes]
_check_includes(entity, includes)
auth_required = _get_auth_type(entity, id, includes)
args = dict(params)
if len(includes) > 0:
inc = " ".join(includes)
args["inc"] = inc
# Build the endpoint components.
path = '%s/%s' % (entity, id)
return _mb_request(path, 'GET', auth_required, args=args)
def _do_mb_search(entity, query='', fields={},
limit=None, offset=None, strict=False):
"""Perform a full-text search on the MusicBrainz search server.
`query` is a lucene query string when no fields are set,
but is escaped when any fields are given. `fields` is a dictionary
of key/value query parameters. They keys in `fields` must be valid
for the given entity type.
"""
# Encode the query terms as a Lucene query string.
query_parts = []
if query:
clean_query = util._unicode(query)
if fields:
clean_query = re.sub(LUCENE_SPECIAL, r'\\\1',
clean_query)
if strict:
query_parts.append('"%s"' % clean_query)
else:
query_parts.append(clean_query.lower())
else:
query_parts.append(clean_query)
for key, value in fields.items():
# Ensure this is a valid search field.
if key not in VALID_SEARCH_FIELDS[entity]:
raise InvalidSearchFieldError(
'%s is not a valid search field for %s' % (key, entity)
)
# Escape Lucene's special characters.
value = util._unicode(value)
value = re.sub(LUCENE_SPECIAL, r'\\\1', value)
if value:
if strict:
query_parts.append('%s:"%s"' % (key, value))
else:
value = value.lower() # avoid AND / OR
query_parts.append('%s:(%s)' % (key, value))
if strict:
full_query = ' AND '.join(query_parts).strip()
else:
full_query = ' '.join(query_parts).strip()
if not full_query:
raise ValueError('at least one query term is required')
# Additional parameters to the search.
params = {'query': full_query}
if limit:
params['limit'] = str(limit)
if offset:
params['offset'] = str(offset)
return _do_mb_query(entity, '', [], params)
def _do_mb_delete(path):
"""Send a DELETE request for the specified object.
"""
return _mb_request(path, 'DELETE', AUTH_YES, True)
def _do_mb_put(path):
"""Send a PUT request for the specified object.
"""
return _mb_request(path, 'PUT', AUTH_YES, True)
def _do_mb_post(path, body):
"""Perform a single POST call for an endpoint with a specified
request body.
"""
return _mb_request(path, 'POST', AUTH_YES, True, body=body)
# The main interface!
# Single entity by ID
@_docstring_get("area")
def get_area_by_id(id, includes=[], release_status=[], release_type=[]):
"""Get the area with the MusicBrainz `id` as a dict with an 'area' key.
*Available includes*: {includes}"""
params = _check_filter_and_make_params("area", includes,
release_status, release_type)
return _do_mb_query("area", id, includes, params)
@_docstring_get("artist")
def get_artist_by_id(id, includes=[], release_status=[], release_type=[]):
"""Get the artist with the MusicBrainz `id` as a dict with an 'artist' key.
*Available includes*: {includes}"""
params = _check_filter_and_make_params("artist", includes,
release_status, release_type)
return _do_mb_query("artist", id, includes, params)
@_docstring_get("instrument")
def get_instrument_by_id(id, includes=[], release_status=[], release_type=[]):
"""Get the instrument with the MusicBrainz `id` as a dict with an 'artist' key.
*Available includes*: {includes}"""
params = _check_filter_and_make_params("instrument", includes,
release_status, release_type)
return _do_mb_query("instrument", id, includes, params)
@_docstring_get("label")
def get_label_by_id(id, includes=[], release_status=[], release_type=[]):
"""Get the label with the MusicBrainz `id` as a dict with a 'label' key.
*Available includes*: {includes}"""
params = _check_filter_and_make_params("label", includes,
release_status, release_type)
return _do_mb_query("label", id, includes, params)
@_docstring_get("place")
def get_place_by_id(id, includes=[], release_status=[], release_type=[]):
"""Get the place with the MusicBrainz `id` as a dict with an 'place' key.
*Available includes*: {includes}"""
params = _check_filter_and_make_params("place", includes,
release_status, release_type)
return _do_mb_query("place", id, includes, params)
@_docstring_get("event")
def get_event_by_id(id, includes=[], release_status=[], release_type=[]):
"""Get the event with the MusicBrainz `id` as a dict with an 'event' key.
The event dict has the following keys:
`id`, `type`, `name`, `time`, `disambiguation` and `life-span`.
*Available includes*: {includes}"""
params = _check_filter_and_make_params("event", includes,
release_status, release_type)
return _do_mb_query("event", id, includes, params)
@_docstring_get("recording")
def get_recording_by_id(id, includes=[], release_status=[], release_type=[]):
"""Get the recording with the MusicBrainz `id` as a dict
with a 'recording' key.
*Available includes*: {includes}"""
params = _check_filter_and_make_params("recording", includes,
release_status, release_type)
return _do_mb_query("recording", id, includes, params)
@_docstring_get("release")
def get_release_by_id(id, includes=[], release_status=[], release_type=[]):
"""Get the release with the MusicBrainz `id` as a dict with a 'release' key.
*Available includes*: {includes}"""
params = _check_filter_and_make_params("release", includes,
release_status, release_type)
return _do_mb_query("release", id, includes, params)
@_docstring_get("release-group")
def get_release_group_by_id(id, includes=[],
release_status=[], release_type=[]):
"""Get the release group with the MusicBrainz `id` as a dict
with a 'release-group' key.
*Available includes*: {includes}"""
params = _check_filter_and_make_params("release-group", includes,
release_status, release_type)
return _do_mb_query("release-group", id, includes, params)
@_docstring_get("series")
def get_series_by_id(id, includes=[]):
"""Get the series with the MusicBrainz `id` as a dict with a 'series' key.
*Available includes*: {includes}"""
return _do_mb_query("series", id, includes)
@_docstring_get("work")
def get_work_by_id(id, includes=[]):
"""Get the work with the MusicBrainz `id` as a dict with a 'work' key.
*Available includes*: {includes}"""
return _do_mb_query("work", id, includes)
@_docstring_get("url")
def get_url_by_id(id, includes=[]):
"""Get the url with the MusicBrainz `id` as a dict with a 'url' key.
*Available includes*: {includes}"""
return _do_mb_query("url", id, includes)
# Searching
@_docstring_search("annotation")
def search_annotations(query='', limit=None, offset=None, strict=False, **fields):
"""Search for annotations and return a dict with an 'annotation-list' key.
*Available search fields*: {fields}"""
return _do_mb_search('annotation', query, fields, limit, offset, strict)
@_docstring_search("area")
def search_areas(query='', limit=None, offset=None, strict=False, **fields):
"""Search for areas and return a dict with an 'area-list' key.
*Available search fields*: {fields}"""
return _do_mb_search('area', query, fields, limit, offset, strict)
@_docstring_search("artist")
def search_artists(query='', limit=None, offset=None, strict=False, **fields):
"""Search for artists and return a dict with an 'artist-list' key.
*Available search fields*: {fields}"""
return _do_mb_search('artist', query, fields, limit, offset, strict)
@_docstring_search("event")
def search_events(query='', limit=None, offset=None, strict=False, **fields):
"""Search for events and return a dict with an 'event-list' key.
*Available search fields*: {fields}"""
return _do_mb_search('event', query, fields, limit, offset, strict)
@_docstring_search("instrument")
def search_instruments(query='', limit=None, offset=None, strict=False, **fields):
"""Search for instruments and return a dict with a 'instrument-list' key.
*Available search fields*: {fields}"""
return _do_mb_search('instrument', query, fields, limit, offset, strict)
@_docstring_search("label")
def search_labels(query='', limit=None, offset=None, strict=False, **fields):
"""Search for labels and return a dict with a 'label-list' key.
*Available search fields*: {fields}"""
return _do_mb_search('label', query, fields, limit, offset, strict)
@_docstring_search("place")
def search_places(query='', limit=None, offset=None, strict=False, **fields):
"""Search for places and return a dict with a 'place-list' key.
*Available search fields*: {fields}"""
return _do_mb_search('place', query, fields, limit, offset, strict)
@_docstring_search("recording")
def search_recordings(query='', limit=None, offset=None,
strict=False, **fields):
"""Search for recordings and return a dict with a 'recording-list' key.
*Available search fields*: {fields}"""
return _do_mb_search('recording', query, fields, limit, offset, strict)
@_docstring_search("release")
def search_releases(query='', limit=None, offset=None, strict=False, **fields):
"""Search for recordings and return a dict with a 'recording-list' key.
*Available search fields*: {fields}"""
return _do_mb_search('release', query, fields, limit, offset, strict)
@_docstring_search("release-group")
def search_release_groups(query='', limit=None, offset=None,
strict=False, **fields):
"""Search for release groups and return a dict
with a 'release-group-list' key.
*Available search fields*: {fields}"""
return _do_mb_search('release-group', query, fields, limit, offset, strict)
@_docstring_search("series")
def search_series(query='', limit=None, offset=None, strict=False, **fields):
"""Search for series and return a dict with a 'series-list' key.
*Available search fields*: {fields}"""
return _do_mb_search('series', query, fields, limit, offset, strict)
@_docstring_search("work")
def search_works(query='', limit=None, offset=None, strict=False, **fields):
"""Search for works and return a dict with a 'work-list' key.
*Available search fields*: {fields}"""
return _do_mb_search('work', query, fields, limit, offset, strict)
# Lists of entities
@_docstring_get("discid")
def get_releases_by_discid(id, includes=[], toc=None, cdstubs=True, media_format=None):
"""Search for releases with a :musicbrainz:`Disc ID` or table of contents.
When a `toc` is provided and no release with the disc ID is found,
a fuzzy search by the toc is done.
The `toc` should have to same format as :attr:`discid.Disc.toc_string`.
When a `toc` is provided, the format of the discid itself is not
checked server-side, so any value may be passed if searching by only
`toc` is desired.
If no toc matches in musicbrainz but a :musicbrainz:`CD Stub` does,
the CD Stub will be returned. Prevent this from happening by
passing `cdstubs=False`.
By default only results that match a format that allows discids
(e.g. CD) are included. To include all media formats, pass
`media_format='all'`.
The result is a dict with either a 'disc' , a 'cdstub' key
or a 'release-list' (fuzzy match with TOC).
A 'disc' has an 'offset-count', an 'offset-list' and a 'release-list'.
A 'cdstub' key has direct 'artist' and 'title' keys.
*Available includes*: {includes}"""
params = _check_filter_and_make_params("discid", includes, release_status=[],
release_type=[])
if toc:
params["toc"] = toc
if not cdstubs:
params["cdstubs"] = "no"
if media_format:
params["media-format"] = media_format
return _do_mb_query("discid", id, includes, params)
@_docstring_get("recording")
def get_recordings_by_isrc(isrc, includes=[], release_status=[],
release_type=[]):
"""Search for recordings with an :musicbrainz:`ISRC`.
The result is a dict with an 'isrc' key,
which again includes a 'recording-list'.
*Available includes*: {includes}"""
params = _check_filter_and_make_params("isrc", includes,
release_status, release_type)
return _do_mb_query("isrc", isrc, includes, params)
@_docstring_get("work")
def get_works_by_iswc(iswc, includes=[]):
"""Search for works with an :musicbrainz:`ISWC`.
The result is a dict with a`work-list`.
*Available includes*: {includes}"""
return _do_mb_query("iswc", iswc, includes)
def _browse_impl(entity, includes, limit, offset, params, release_status=[], release_type=[]):
includes = includes if isinstance(includes, list) else [includes]
valid_includes = VALID_BROWSE_INCLUDES[entity]
_check_includes_impl(includes, valid_includes)
p = {}
for k,v in params.items():
if v:
p[k] = v
if len(p) > 1:
raise Exception("Can't have more than one of " + ", ".join(params.keys()))
if limit: p["limit"] = limit
if offset: p["offset"] = offset
filterp = _check_filter_and_make_params(entity, includes, release_status, release_type)
p.update(filterp)
return _do_mb_query(entity, "", includes, p)
# Browse methods
# Browse include are a subset of regular get includes, so we check them here
# and the test in _do_mb_query will pass anyway.
@_docstring_browse("artist")
def browse_artists(recording=None, release=None, release_group=None,
work=None, includes=[], limit=None, offset=None):
"""Get all artists linked to a recording, a release or a release group.
You need to give one MusicBrainz ID.
*Available includes*: {includes}"""
params = {"recording": recording,
"release": release,
"release-group": release_group,
"work": work}
return _browse_impl("artist", includes, limit, offset, params)
@_docstring_browse("event")
def browse_events(area=None, artist=None, place=None,
includes=[], limit=None, offset=None):
"""Get all events linked to a area, a artist or a place.
You need to give one MusicBrainz ID.
*Available includes*: {includes}"""
params = {"area": area,
"artist": artist,
"place": place}
return _browse_impl("event", includes, limit, offset, params)
@_docstring_browse("label")
def browse_labels(release=None, includes=[], limit=None, offset=None):
"""Get all labels linked to a relase. You need to give a MusicBrainz ID.
*Available includes*: {includes}"""
params = {"release": release}
return _browse_impl("label", includes, limit, offset, params)
@_docstring_browse("place")
def browse_places(area=None, includes=[], limit=None, offset=None):
"""Get all places linked to an area. You need to give a MusicBrainz ID.
*Available includes*: {includes}"""
params = {"area": area}
return _browse_impl("place", includes, limit, offset, params)
@_docstring_browse("recording")
def browse_recordings(artist=None, release=None, includes=[],
limit=None, offset=None):
"""Get all recordings linked to an artist or a release.
You need to give one MusicBrainz ID.
*Available includes*: {includes}"""
params = {"artist": artist,
"release": release}
return _browse_impl("recording", includes, limit, offset, params)
@_docstring_browse("release")
def browse_releases(artist=None, track_artist=None, label=None, recording=None,
release_group=None, release_status=[], release_type=[],
includes=[], limit=None, offset=None):
"""Get all releases linked to an artist, a label, a recording
or a release group. You need to give one MusicBrainz ID.
You can also browse by `track_artist`, which gives all releases where some
tracks are attributed to that artist, but not the whole release.
You can filter by :data:`musicbrainz.VALID_RELEASE_TYPES` or
:data:`musicbrainz.VALID_RELEASE_STATUSES`.
*Available includes*: {includes}"""
# track_artist param doesn't work yet
params = {"artist": artist,
"track_artist": track_artist,
"label": label,
"recording": recording,
"release-group": release_group}
return _browse_impl("release", includes, limit, offset,
params, release_status, release_type)
@_docstring_browse("release-group")
def browse_release_groups(artist=None, release=None, release_type=[],
includes=[], limit=None, offset=None):
"""Get all release groups linked to an artist or a release.
You need to give one MusicBrainz ID.
You can filter by :data:`musicbrainz.VALID_RELEASE_TYPES`.
*Available includes*: {includes}"""
params = {"artist": artist,
"release": release}
return _browse_impl("release-group", includes, limit,
offset, params, [], release_type)
@_docstring_browse("url")
def browse_urls(resource=None, includes=[], limit=None, offset=None):
"""Get urls by actual URL string.
You need to give a URL string as 'resource'
*Available includes*: {includes}"""
params = {"resource": resource}
return _browse_impl("url", includes, limit, offset, params)
@_docstring_browse("work")
def browse_works(artist=None, includes=[], limit=None, offset=None):
"""Get all works linked to an artist
*Available includes*: {includes}"""
params = {"artist": artist}
return _browse_impl("work", includes, limit, offset, params)
# Collections
def get_collections():
"""List the collections for the currently :func:`authenticated <auth>` user
as a dict with a 'collection-list' key."""
# Missing <release-list count="n"> the count in the reply
return _do_mb_query("collection", '')
def _do_collection_query(collection, collection_type, limit, offset):
params = {}
if limit: params["limit"] = limit
if offset: params["offset"] = offset
return _do_mb_query("collection", "%s/%s" % (collection, collection_type), [], params)
def get_artists_in_collection(collection, limit=None, offset=None):
"""List the artists in a collection.
Returns a dict with a 'collection' key, which again has a 'artist-list'.
See `Browsing`_ for how to use `limit` and `offset`.
"""
return _do_collection_query(collection, "artists", limit, offset)
def get_releases_in_collection(collection, limit=None, offset=None):
"""List the releases in a collection.
Returns a dict with a 'collection' key, which again has a 'release-list'.
See `Browsing`_ for how to use `limit` and `offset`.
"""
return _do_collection_query(collection, "releases", limit, offset)
def get_events_in_collection(collection, limit=None, offset=None):
"""List the events in a collection.
Returns a dict with a 'collection' key, which again has a 'event-list'.
See `Browsing`_ for how to use `limit` and `offset`.
"""
return _do_collection_query(collection, "events", limit, offset)
def get_places_in_collection(collection, limit=None, offset=None):
"""List the places in a collection.
Returns a dict with a 'collection' key, which again has a 'place-list'.
See `Browsing`_ for how to use `limit` and `offset`.
"""
return _do_collection_query(collection, "places", limit, offset)
def get_recordings_in_collection(collection, limit=None, offset=None):
"""List the recordings in a collection.
Returns a dict with a 'collection' key, which again has a 'recording-list'.
See `Browsing`_ for how to use `limit` and `offset`.
"""
return _do_collection_query(collection, "recordings", limit, offset)
def get_works_in_collection(collection, limit=None, offset=None):
"""List the works in a collection.
Returns a dict with a 'collection' key, which again has a 'work-list'.
See `Browsing`_ for how to use `limit` and `offset`.
"""
return _do_collection_query(collection, "works", limit, offset)
# Submission methods
def submit_barcodes(release_barcode):
"""Submits a set of {release_id1: barcode, ...}"""
query = mbxml.make_barcode_request(release_barcode)
return _do_mb_post("release", query)
def submit_isrcs(recording_isrcs):
"""Submit ISRCs.
Submits a set of {recording-id1: [isrc1, ...], ...}
or {recording_id1: isrc, ...}.
"""
rec2isrcs = dict()
for (rec, isrcs) in recording_isrcs.items():
rec2isrcs[rec] = isrcs if isinstance(isrcs, list) else [isrcs]
query = mbxml.make_isrc_request(rec2isrcs)
return _do_mb_post("recording", query)
def submit_tags(**kwargs):
"""Submit user tags.
Takes parameters named e.g. 'artist_tags', 'recording_tags', etc.,
and of the form:
{entity_id1: [tag1, ...], ...}
If you only have one tag for an entity you can use a string instead
of a list.
The user's tags for each entity will be set to that list, adding or
removing tags as necessary. Submitting an empty list for an entity
will remove all tags for that entity by the user.
"""
for k, v in kwargs.items():
for id, tags in v.items():
kwargs[k][id] = tags if isinstance(tags, list) else [tags]
query = mbxml.make_tag_request(**kwargs)
return _do_mb_post("tag", query)
def submit_ratings(**kwargs):
"""Submit user ratings.
Takes parameters named e.g. 'artist_ratings', 'recording_ratings', etc.,
and of the form:
{entity_id1: rating, ...}
Ratings are numbers from 0-100, at intervals of 20 (20 per 'star').
Submitting a rating of 0 will remove the user's rating.
"""
query = mbxml.make_rating_request(**kwargs)
return _do_mb_post("rating", query)
def add_releases_to_collection(collection, releases=[]):
"""Add releases to a collection.
Collection and releases should be identified by their MBIDs
"""
# XXX: Maximum URI length of 16kb means we should only allow ~400 releases
releaselist = ";".join(releases)
return _do_mb_put("collection/%s/releases/%s" % (collection, releaselist))
def remove_releases_from_collection(collection, releases=[]):
"""Remove releases from a collection.
Collection and releases should be identified by their MBIDs
"""
releaselist = ";".join(releases)
return _do_mb_delete("collection/%s/releases/%s" % (collection, releaselist))
| 48,910
|
Python
|
.py
| 1,112
| 38.153777
| 153
| 0.660808
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,875
|
__init__.py
|
rembo10_headphones/lib/pkg_resources/__init__.py
|
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
This module is deprecated. Users are directed to :mod:`importlib.resources`,
:mod:`importlib.metadata` and :pypi:`packaging` instead.
"""
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import errno
import tempfile
import textwrap
import inspect
import ntpath
import posixpath
import importlib
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
try:
FileExistsError
except NameError:
FileExistsError = OSError
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from pkg_resources.extern.jaraco.text import (
yield_lines,
drop_comment,
join_continuation,
)
from pkg_resources.extern import platformdirs
from pkg_resources.extern import packaging
__import__('pkg_resources.extern.packaging.version')
__import__('pkg_resources.extern.packaging.specifiers')
__import__('pkg_resources.extern.packaging.requirements')
__import__('pkg_resources.extern.packaging.markers')
__import__('pkg_resources.extern.packaging.utils')
if sys.version_info < (3, 5):
raise RuntimeError("Python 3.5 or later is required")
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
add_activation_listener = None
resources_stream = None
cleanup_resources = None
resource_dir = None
resource_stream = None
set_extraction_path = None
resource_isdir = None
resource_string = None
iter_entry_points = None
resource_listdir = None
resource_filename = None
resource_exists = None
_distribution_finders = None
_namespace_handlers = None
_namespace_packages = None
warnings.warn(
"pkg_resources is deprecated as an API. "
"See https://setuptools.pypa.io/en/latest/pkg_resources.html",
DeprecationWarning,
stacklevel=2,
)
_PEP440_FALLBACK = re.compile(r"^v?(?P<safe>(?:[0-9]+!)?[0-9]+(?:\.[0-9]+)*)", re.I)
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
parse_version = packaging.version.Version
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of macOS that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of macOS that we are *running*. To allow usage of packages that
explicitly require a newer version of macOS, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3))
except ValueError:
# not macOS
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require',
'run_script',
'get_provider',
'get_distribution',
'load_entry_point',
'get_entry_map',
'get_entry_info',
'iter_entry_points',
'resource_string',
'resource_stream',
'resource_filename',
'resource_listdir',
'resource_exists',
'resource_isdir',
# Environmental control
'declare_namespace',
'working_set',
'add_activation_listener',
'find_distributions',
'set_extraction_path',
'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment',
'WorkingSet',
'ResourceManager',
'Distribution',
'Requirement',
'EntryPoint',
# Exceptions
'ResolutionError',
'VersionConflict',
'DistributionNotFound',
'UnknownExtra',
'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements',
'parse_version',
'safe_name',
'safe_version',
'get_platform',
'compatible_platforms',
'yield_lines',
'split_sections',
'safe_extra',
'to_filename',
'invalid_marker',
'evaluate_marker',
# filesystem utilities
'ensure_directory',
'normalize_path',
# Distribution "precedence" constants
'EGG_DIST',
'BINARY_DIST',
'SOURCE_DIST',
'CHECKOUT_DIST',
'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider',
'IResourceProvider',
'FileMetadata',
'PathMetadata',
'EggMetadata',
'EmptyProvider',
'empty_provider',
'NullProvider',
'EggProvider',
'DefaultProvider',
'ZipProvider',
'register_finder',
'register_namespace_handler',
'register_loader_type',
'fixup_namespace_packages',
'get_importer',
# Warnings
'PkgResourcesDeprecationWarning',
# Deprecated/backward compatibility only
'run_main',
'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__ + repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = (
"The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}"
)
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = '{}.{}'.format(*sys.version_info)
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macos_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macos_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and macOS.
"""
from sysconfig import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macos_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (
int(version[0]),
int(version[1]),
_macos_arch(machine),
)
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# macOS special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macOS designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if (
dversion == 7
and macosversion >= "10.3"
or dversion == 8
and macosversion >= "10.4"
):
return True
# egg isn't macOS or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, str):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet:
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.normalized_to_canonical_keys = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is None:
canonical_key = self.normalized_to_canonical_keys.get(req.key)
if canonical_key is not None:
req.key = canonical_key
dist = self.by_key.get(canonical_key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
return (
entry
for dist in self
for entry in dist.get_entry_map(group).values()
if name is None or name == entry.name
)
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key] = 1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
normalized_name = packaging.utils.canonicalize_name(dist.key)
self.normalized_to_canonical_keys[normalized_name] = dist.key
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(
self,
requirements,
env=None,
installer=None,
replace_conflicting=False,
extras=None,
):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception
if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
`extras` is a list of the extras to be used with these requirements.
This is important because extra requirements may look like `my_req;
extra = "my_extra"`, which would otherwise be interpreted as a purely
optional requirement. Instead, we want to be able to assert that these
requirements are truly required.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
req_extras = _ReqExtras()
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
if not req_extras.markers_pass(req, extras):
continue
dist = self._resolve_dist(
req, best, replace_conflicting, env, installer, required_by, to_activate
)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
# return list of distros to activate
return to_activate
def _resolve_dist(
self, req, best, replace_conflicting, env, installer, required_by, to_activate
):
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(
req, ws, installer, replace_conflicting=replace_conflicting
)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
return dist
def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
if not existing:
return
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:],
self.entry_keys.copy(),
self.by_key.copy(),
self.normalized_to_canonical_keys.copy(),
self.callbacks[:],
)
def __setstate__(self, e_k_b_n_c):
entries, keys, by_key, normalized_to_canonical_keys, callbacks = e_k_b_n_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.normalized_to_canonical_keys = normalized_to_canonical_keys.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req, extras=None):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (extras or (None,))
)
return not req.marker or any(extra_evals)
class Environment:
"""Searchable snapshot of distributions on a search path"""
def __init__(
self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR
):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.6'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
py_compat = (
self.python is None
or dist.py_version is None
or dist.py_version == self.python
)
return py_compat and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None, replace_conflicting=False):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
try:
dist = working_set.find(req)
except VersionConflict:
if not replace_conflicting:
raise
dist = None
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(resource_name)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(resource_name)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent(
"""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s)
to the Python egg cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory?
You can change the cache directory by setting the PYTHON_EGG_CACHE
environment variable to point to an accessible directory.
"""
).lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except Exception:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = (
"Extraction path is writable by group/others "
"and vulnerable to attack when "
"used with get_resource_filename ({path}). "
"Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)."
).format(**locals())
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError("Can't change extraction path, files already extracted")
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
"""
return os.environ.get('PYTHON_EGG_CACHE') or platformdirs.user_cache_dir(
appname='Python-Eggs'
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def _forgiving_version(version):
"""Fallback when ``safe_version`` is not safe enough
>>> parse_version(_forgiving_version('0.23ubuntu1'))
<Version('0.23.dev0+sanitized.ubuntu1')>
>>> parse_version(_forgiving_version('0.23-'))
<Version('0.23.dev0+sanitized')>
>>> parse_version(_forgiving_version('0.-_'))
<Version('0.dev0+sanitized')>
>>> parse_version(_forgiving_version('42.+?1'))
<Version('42.dev0+sanitized.1')>
>>> parse_version(_forgiving_version('hello world'))
<Version('0.dev0+sanitized.hello.world')>
"""
version = version.replace(' ', '.')
match = _PEP440_FALLBACK.search(version)
if match:
safe = match["safe"]
rest = version[len(safe) :]
else:
safe = "0"
rest = version
local = f"sanitized.{_safe_segment(rest)}".strip(".")
return f"{safe}.dev0+{local}"
def _safe_segment(segment):
"""Convert an arbitrary string into a safe segment"""
segment = re.sub('[^A-Za-z0-9.]+', '-', segment)
segment = re.sub('-[^A-Za-z0-9]+', '-', segment)
return re.sub(r'\.[^A-Za-z0-9]+', '.', segment).strip(".-")
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_')
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
if invalid or False otherwise.
"""
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'pyparsing' module.
"""
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e) from e
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def _get_metadata_path(self, name):
return self._fn(self.egg_info, name)
def has_metadata(self, name):
if not self.egg_info:
return self.egg_info
path = self._get_metadata_path(name)
return self._has(path)
def get_metadata(self, name):
if not self.egg_info:
return ""
path = self._get_metadata_path(name)
value = self._get(path)
try:
return value.decode('utf-8')
except UnicodeDecodeError as exc:
# Include the path in the error message to simplify
# troubleshooting, and without changing the exception type.
exc.reason += ' in {} file at path: {}'.format(name, path)
raise
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
if not self.has_metadata(script):
raise ResolutionError(
"Script {script!r} not found in metadata at {self.egg_info!r}".format(
**locals()
),
)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
with open(script_filename) as fid:
source = fid.read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text),
0,
script_text.split('\n'),
script_filename,
)
script_code = compile(script_text, script_filename, 'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
self._validate_resource_path(resource_name)
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
@staticmethod
def _validate_resource_path(path):
"""
Validate the resource paths according to the docs.
https://setuptools.pypa.io/en/latest/pkg_resources.html#basic-resource-access
>>> warned = getfixture('recwarn')
>>> warnings.simplefilter('always')
>>> vrp = NullProvider._validate_resource_path
>>> vrp('foo/bar.txt')
>>> bool(warned)
False
>>> vrp('../foo/bar.txt')
>>> bool(warned)
True
>>> warned.clear()
>>> vrp('/foo/bar.txt')
>>> bool(warned)
True
>>> vrp('foo/../../bar.txt')
>>> bool(warned)
True
>>> warned.clear()
>>> vrp('foo/f../bar.txt')
>>> bool(warned)
False
Windows path separators are straight-up disallowed.
>>> vrp(r'\\foo/bar.txt')
Traceback (most recent call last):
...
ValueError: Use of .. or absolute path in a resource path \
is not allowed.
>>> vrp(r'C:\\foo/bar.txt')
Traceback (most recent call last):
...
ValueError: Use of .. or absolute path in a resource path \
is not allowed.
Blank values are allowed
>>> vrp('')
>>> bool(warned)
False
Non-string values are not.
>>> vrp(None)
Traceback (most recent call last):
...
AttributeError: ...
"""
invalid = (
os.path.pardir in path.split(posixpath.sep)
or posixpath.isabs(path)
or ntpath.isabs(path)
)
if not invalid:
return
msg = "Use of .. or absolute path in a resource path is not allowed."
# Aggressively disallow Windows absolute paths
if ntpath.isabs(path) and not posixpath.isabs(path):
raise ValueError(msg)
# for compatibility, warn; in future
# raise ValueError(msg)
issue_warning(
msg[:-1] + " and will raise exceptions in a future release.",
DeprecationWarning,
)
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
def _parents(path):
"""
yield all parents of path including path
"""
last = None
while path != last:
yield path
last = path
path, _ = os.path.split(path)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
super().__init__(module)
self._setup_prefix()
def _setup_prefix(self):
# Assume that metadata may be nested inside a "basket"
# of multiple eggs and use module_path instead of .archive.
eggs = filter(_is_egg_path, _parents(self.module_path))
egg = next(eggs, None)
egg and self._set_egg(egg)
def _set_egg(self, path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_names = (
'SourceFileLoader',
'SourcelessFileLoader',
)
for name in loader_names:
loader_cls = getattr(importlib_machinery, name, type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
module_path = None
_isdir = _has = lambda self, path: False
def _get(self, path):
return ''
def _listdir(self, path):
return []
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with zipfile.ZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
super().__init__(module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
fspath = fspath.rstrip(os.sep)
if fspath == self.loader.archive:
return ''
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre) :]
raise AssertionError("%s is not a subpath of %s" % (fspath, self.zip_pre))
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1 :].split(os.sep)
raise AssertionError("%s is not a subpath of %s" % (fspath, self.egg_root))
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
# FIXME: 'ZipProvider._extract_resource' is too complex (12)
def _extract_resource(self, manager, zip_path): # noqa: C901
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(manager, os.path.join(zip_path, name))
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise OSError(
'"os.rename" and "os.unlink" are not supported ' 'on this platform'
)
try:
real_path = manager.get_cache_path(self.egg_name, self._parts(zip_path))
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(
".$extract",
dir=os.path.dirname(real_path),
)
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name == 'nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def _get_metadata_path(self, name):
return self.path
def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
with open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata):
replacement_char = '�'
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive + os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders={})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir(''):
if _is_egg_path(subitem):
subpath = os.path.join(path_item, subitem)
dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
yield from dists
elif subitem.lower().endswith(('.dist-info', '.egg-info')):
subpath = os.path.join(path_item, subitem)
submeta = EggMetadata(zipimport.zipimporter(subpath))
submeta.egg_info = subpath
yield Distribution.from_location(path_item, subitem, submeta)
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item,
metadata=PathMetadata(path_item, os.path.join(path_item, 'EGG-INFO')),
)
return
entries = (os.path.join(path_item, child) for child in safe_listdir(path_item))
# scan for .egg and .egg-info in directory
for entry in sorted(entries):
fullpath = os.path.join(path_item, entry)
factory = dist_factory(path_item, entry, only)
yield from factory(fullpath)
def dist_factory(path_item, entry, only):
"""Return a dist_factory for the given entry."""
lower = entry.lower()
is_egg_info = lower.endswith('.egg-info')
is_dist_info = lower.endswith('.dist-info') and os.path.isdir(
os.path.join(path_item, entry)
)
is_meta = is_egg_info or is_dist_info
return (
distributions_from_metadata
if is_meta
else find_distributions
if not only and _is_egg_path(entry)
else resolve_egg_link
if not only and lower.endswith('.egg-link')
else NoDists()
)
class NoDists:
"""
>>> bool(NoDists())
False
>>> list(NoDists()('anything'))
[]
"""
def __bool__(self):
return False
def __call__(self, fullpath):
return iter(())
def safe_listdir(path):
"""
Attempt to list contents of path, but suppress some exceptions.
"""
try:
return os.listdir(path)
except (PermissionError, NotADirectoryError):
pass
except OSError as e:
# Ignore the directory if does not exist, not a directory or
# permission denied
if e.errno not in (errno.ENOTDIR, errno.EACCES, errno.ENOENT):
raise
return ()
def distributions_from_metadata(path):
root = os.path.dirname(path)
if os.path.isdir(path):
if len(os.listdir(path)) == 0:
# empty metadata dir; skip
return
metadata = PathMetadata(root, path)
else:
metadata = FileMetadata(path)
entry = os.path.basename(path)
yield Distribution.from_location(
root,
entry,
metadata,
precedence=DEVELOP_DIST,
)
def non_empty_lines(path):
"""
Yield non-empty lines from file at path
"""
with open(path) as f:
for line in f:
line = line.strip()
if line:
yield line
def resolve_egg_link(path):
"""
Given a path to an .egg-link, resolve distributions
present in the referenced path.
"""
referenced_paths = non_empty_lines(path)
resolved_paths = (
os.path.join(os.path.dirname(path), ref) for ref in referenced_paths
)
dist_groups = map(find_distributions, resolved_paths)
return next(dist_groups, ())
if hasattr(pkgutil, 'ImpImporter'):
register_finder(pkgutil.ImpImporter, find_on_path)
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
# use find_spec (PEP 451) and fall-back to find_module (PEP 302)
try:
spec = importer.find_spec(packageName)
except AttributeError:
# capture warnings due to #1111
with warnings.catch_warnings():
warnings.simplefilter("ignore")
loader = importer.find_module(packageName)
else:
loader = spec.loader if spec else None
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
importlib.import_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
new_path = sorted(orig_path, key=position_in_sys_path)
new_path = [_normalize_cached(p) for p in new_path]
if isinstance(module.__path__, list):
module.__path__[:] = new_path
else:
module.__path__ = new_path
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
msg = (
f"Deprecated call to `pkg_resources.declare_namespace({packageName!r})`.\n"
"Implementing implicit namespace packages (as specified in PEP 420) "
"is preferred to `pkg_resources.declare_namespace`. "
"See https://setuptools.pypa.io/en/latest/references/"
"keywords.html#keyword-namespace-packages"
)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path = sys.path
parent, _, _ = packageName.rpartition('.')
if parent:
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError as e:
raise TypeError("Not a package:", parent) from e
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent or None, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
if hasattr(pkgutil, 'ImpImporter'):
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename))))
def _cygwin_patch(filename): # pragma: nocover
"""
Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
symlink components. Using
os.path.abspath() works around this limitation. A fix in os.getcwd()
would probably better, in Cygwin even more so, except
that this seems to be by design...
"""
return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_egg_path(path):
"""
Determine if given path appears to be an egg.
"""
return _is_zip_egg(path) or _is_unpacked_egg(path)
def _is_zip_egg(path):
return (
path.lower().endswith('.egg')
and os.path.isfile(path)
and zipfile.is_zipfile(path)
)
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return path.lower().endswith('.egg') and os.path.isfile(
os.path.join(path, 'EGG-INFO', 'PKG-INFO')
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint:
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = tuple(extras)
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
PkgResourcesDeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc)) from exc
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
# Get the requirements for this entry point with all its extras and
# then resolve them. We have to pass `extras` along when resolving so
# that the working set knows what extras we want. Otherwise, for
# dist-info distributions, the working set will assume that the
# requirements for that extra are purely optional and skip over them.
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer, extras=self.extras)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
def is_version_line(line):
return line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution:
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(
self,
location=None,
metadata=None,
project_name=None,
version=None,
py_version=PY_MAJOR,
platform=None,
precedence=EGG_DIST,
):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location,
metadata,
project_name=project_name,
version=version,
py_version=py_version,
platform=platform,
**kw,
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self._forgiving_parsed_version,
self.precedence,
self.key,
self.location,
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
try:
self._parsed_version = parse_version(self.version)
except packaging.version.InvalidVersion as ex:
info = f"(package: {self.project_name})"
if hasattr(ex, "add_note"):
ex.add_note(info) # PEP 678
raise
raise packaging.version.InvalidVersion(f"{str(ex)} {info}") from None
return self._parsed_version
@property
def _forgiving_parsed_version(self):
try:
return self.parsed_version
except packaging.version.InvalidVersion as ex:
self._parsed_version = parse_version(_forgiving_version(self.version))
notes = "\n".join(getattr(ex, "__notes__", [])) # PEP 678
msg = f"""!!\n\n
*************************************************************************
{str(ex)}\n{notes}
This is a long overdue deprecation.
For the time being, `pkg_resources` will use `{self._parsed_version}`
as a replacement to avoid breaking existing environments,
but no future compatibility is guaranteed.
If you maintain package {self.project_name} you should implement
the relevant changes to adequate the project to PEP 440 immediately.
*************************************************************************
\n\n!!
"""
warnings.warn(msg, DeprecationWarning)
return self._parsed_version
@property
def version(self):
try:
return self._version
except AttributeError as e:
version = self._get_version()
if version is None:
path = self._get_metadata_path_for_display(self.PKG_INFO)
msg = ("Missing 'Version:' header and/or {} file at path: {}").format(
self.PKG_INFO, path
)
raise ValueError(msg, self) from e
return version
@property
def _dep_map(self):
"""
A map of extra to its list of (direct) requirements
for this distribution, including the null extra.
"""
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._filter_extras(self._build_dep_map())
return self.__dep_map
@staticmethod
def _filter_extras(dm):
"""
Given a mapping of extras to dependencies, strip off
environment markers and filter out any dependencies
not matching the markers.
"""
for extra in list(filter(None, dm)):
new_extra = extra
reqs = dm.pop(extra)
new_extra, _, marker = extra.partition(':')
fails_marker = marker and (
invalid_marker(marker) or not evaluate_marker(marker)
)
if fails_marker:
reqs = []
new_extra = safe_extra(new_extra) or None
dm.setdefault(new_extra, []).extend(reqs)
return dm
def _build_dep_map(self):
dm = {}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError as e:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
) from e
return deps
def _get_metadata_path_for_display(self, name):
"""
Return the path to the given metadata file, if available.
"""
try:
# We need to access _get_metadata_path() on the provider object
# directly rather than through this class's __getattr__()
# since _get_metadata_path() is marked private.
path = self._provider._get_metadata_path(name)
# Handle exceptions e.g. in case the distribution's metadata
# provider doesn't support _get_metadata_path().
except Exception:
return '[could not detect]'
return path
def _get_metadata(self, name):
if self.has_metadata(name):
yield from self.get_metadata_lines(name)
def _get_version(self):
lines = self._get_metadata(self.PKG_INFO)
version = _version_from_file(lines)
return version
def activate(self, path=None, replace=False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name),
to_filename(self.version),
self.py_version or PY_MAJOR,
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
def __dir__(self):
return list(
set(super(Distribution, self).__dir__())
| set(attr for attr in self._provider.__dir__() if not attr.startswith('_'))
)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata, **kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
# FIXME: 'Distribution.insert_on' is too complex (13)
def insert_on(self, path, loc=None, replace=False): # noqa: C901
"""Ensure self.location is on path
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent.
- Else: add to the end of path.
If replace=True:
- If location is already on path anywhere (not eggs)
or higher priority than its parent (eggs)
do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent,
removing any lower-priority entries.
- Else: add it to the front of path.
"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if
# found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (
modname not in sys.modules
or modname in nsp
or modname in _namespace_packages
):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (
normalize_path(fn).startswith(loc) or fn.startswith(self.location)
):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
except SystemError:
# TODO: remove this except clause when python/cpython#103632 is fixed.
return False
return True
def clone(self, **kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = self._get_version()
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""
Wrap an actual or potential sys.path entry
w/metadata, .dist-info style.
"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
yield req
common = types.MappingProxyType(dict.fromkeys(reqs_for_extra(None)))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = [r for r in reqs_for_extra(extra) if r not in common]
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args, **kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
def parse_requirements(strs):
"""
Yield ``Requirement`` objects for each specification in `strs`.
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
return map(Requirement, join_continuation(map(drop_comment, yield_lines(strs))))
class RequirementParseError(packaging.requirements.InvalidRequirement):
"Compatibility wrapper for InvalidRequirement"
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
super(Requirement, self).__init__(requirement_string)
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.url,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return isinstance(other, Requirement) and self.hashCmp == other.hashCmp
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self):
return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
(req,) = parse_requirements(s)
return req
def _always_object(classes):
"""
Ensure object appears in the mro even
for old-style classes.
"""
if object not in classes:
return classes + (object,)
return classes
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
for t in types:
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
os.makedirs(dirname, exist_ok=True)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise OSError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
try:
mkdir(dirname, 0o755)
except FileExistsError:
pass
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args, **kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args, **kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
g.update(
(name, getattr(manager, name))
for name in dir(manager)
if not name.startswith('_')
)
class PkgResourcesDeprecationWarning(Warning):
"""
Base class for warning about deprecations in ``pkg_resources``
This class is not derived from ``DeprecationWarning``, and as such is
visible by default.
"""
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
tuple(dist.activate(replace=False) for dist in working_set)
add_activation_listener(
lambda dist: dist.activate(replace=True),
existing=False,
)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
| 109,343
|
Python
|
.py
| 2,699
| 31.761764
| 88
| 0.620729
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,876
|
__init__.py
|
rembo10_headphones/lib/pkg_resources/extern/__init__.py
|
import importlib.util
import sys
class VendorImporter:
"""
A PEP 302 meta path importer for finding optionally-vendored
or otherwise naturally-installed packages from root_name.
"""
def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
self.root_name = root_name
self.vendored_names = set(vendored_names)
self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
@property
def search_path(self):
"""
Search first the vendor package then as a natural package.
"""
yield self.vendor_pkg + '.'
yield ''
def _module_matches_namespace(self, fullname):
"""Figure out if the target module is vendored."""
root, base, target = fullname.partition(self.root_name + '.')
return not root and any(map(target.startswith, self.vendored_names))
def load_module(self, fullname):
"""
Iterate over the search path to locate and load fullname.
"""
root, base, target = fullname.partition(self.root_name + '.')
for prefix in self.search_path:
try:
extant = prefix + target
__import__(extant)
mod = sys.modules[extant]
sys.modules[fullname] = mod
return mod
except ImportError:
pass
else:
raise ImportError(
"The '{target}' package is required; "
"normally this is bundled with this package so if you get "
"this warning, consult the packager of your "
"distribution.".format(**locals())
)
def create_module(self, spec):
return self.load_module(spec.name)
def exec_module(self, module):
pass
def find_spec(self, fullname, path=None, target=None):
"""Return a module spec for vendored names."""
return (
importlib.util.spec_from_loader(fullname, self)
if self._module_matches_namespace(fullname)
else None
)
def install(self):
"""
Install this importer into sys.meta_path if not already present.
"""
if self not in sys.meta_path:
sys.meta_path.append(self)
names = (
'packaging',
'platformdirs',
'jaraco',
'importlib_resources',
'more_itertools',
)
VendorImporter(__name__, names).install()
| 2,442
|
Python
|
.py
| 68
| 26.852941
| 78
| 0.593141
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,877
|
zipp.py
|
rembo10_headphones/lib/pkg_resources/_vendor/zipp.py
|
import io
import posixpath
import zipfile
import itertools
import contextlib
import sys
import pathlib
if sys.version_info < (3, 7):
from collections import OrderedDict
else:
OrderedDict = dict
__all__ = ['Path']
def _parents(path):
"""
Given a path with elements separated by
posixpath.sep, generate all parents of that path.
>>> list(_parents('b/d'))
['b']
>>> list(_parents('/b/d/'))
['/b']
>>> list(_parents('b/d/f/'))
['b/d', 'b']
>>> list(_parents('b'))
[]
>>> list(_parents(''))
[]
"""
return itertools.islice(_ancestry(path), 1, None)
def _ancestry(path):
"""
Given a path with elements separated by
posixpath.sep, generate all elements of that path
>>> list(_ancestry('b/d'))
['b/d', 'b']
>>> list(_ancestry('/b/d/'))
['/b/d', '/b']
>>> list(_ancestry('b/d/f/'))
['b/d/f', 'b/d', 'b']
>>> list(_ancestry('b'))
['b']
>>> list(_ancestry(''))
[]
"""
path = path.rstrip(posixpath.sep)
while path and path != posixpath.sep:
yield path
path, tail = posixpath.split(path)
_dedupe = OrderedDict.fromkeys
"""Deduplicate an iterable in original order"""
def _difference(minuend, subtrahend):
"""
Return items in minuend not in subtrahend, retaining order
with O(1) lookup.
"""
return itertools.filterfalse(set(subtrahend).__contains__, minuend)
class CompleteDirs(zipfile.ZipFile):
"""
A ZipFile subclass that ensures that implied directories
are always included in the namelist.
"""
@staticmethod
def _implied_dirs(names):
parents = itertools.chain.from_iterable(map(_parents, names))
as_dirs = (p + posixpath.sep for p in parents)
return _dedupe(_difference(as_dirs, names))
def namelist(self):
names = super(CompleteDirs, self).namelist()
return names + list(self._implied_dirs(names))
def _name_set(self):
return set(self.namelist())
def resolve_dir(self, name):
"""
If the name represents a directory, return that name
as a directory (with the trailing slash).
"""
names = self._name_set()
dirname = name + '/'
dir_match = name not in names and dirname in names
return dirname if dir_match else name
@classmethod
def make(cls, source):
"""
Given a source (filename or zipfile), return an
appropriate CompleteDirs subclass.
"""
if isinstance(source, CompleteDirs):
return source
if not isinstance(source, zipfile.ZipFile):
return cls(_pathlib_compat(source))
# Only allow for FastLookup when supplied zipfile is read-only
if 'r' not in source.mode:
cls = CompleteDirs
source.__class__ = cls
return source
class FastLookup(CompleteDirs):
"""
ZipFile subclass to ensure implicit
dirs exist and are resolved rapidly.
"""
def namelist(self):
with contextlib.suppress(AttributeError):
return self.__names
self.__names = super(FastLookup, self).namelist()
return self.__names
def _name_set(self):
with contextlib.suppress(AttributeError):
return self.__lookup
self.__lookup = super(FastLookup, self)._name_set()
return self.__lookup
def _pathlib_compat(path):
"""
For path-like objects, convert to a filename for compatibility
on Python 3.6.1 and earlier.
"""
try:
return path.__fspath__()
except AttributeError:
return str(path)
class Path:
"""
A pathlib-compatible interface for zip files.
Consider a zip file with this structure::
.
├── a.txt
└── b
├── c.txt
└── d
└── e.txt
>>> data = io.BytesIO()
>>> zf = zipfile.ZipFile(data, 'w')
>>> zf.writestr('a.txt', 'content of a')
>>> zf.writestr('b/c.txt', 'content of c')
>>> zf.writestr('b/d/e.txt', 'content of e')
>>> zf.filename = 'mem/abcde.zip'
Path accepts the zipfile object itself or a filename
>>> root = Path(zf)
From there, several path operations are available.
Directory iteration (including the zip file itself):
>>> a, b = root.iterdir()
>>> a
Path('mem/abcde.zip', 'a.txt')
>>> b
Path('mem/abcde.zip', 'b/')
name property:
>>> b.name
'b'
join with divide operator:
>>> c = b / 'c.txt'
>>> c
Path('mem/abcde.zip', 'b/c.txt')
>>> c.name
'c.txt'
Read text:
>>> c.read_text()
'content of c'
existence:
>>> c.exists()
True
>>> (b / 'missing.txt').exists()
False
Coercion to string:
>>> import os
>>> str(c).replace(os.sep, posixpath.sep)
'mem/abcde.zip/b/c.txt'
At the root, ``name``, ``filename``, and ``parent``
resolve to the zipfile. Note these attributes are not
valid and will raise a ``ValueError`` if the zipfile
has no filename.
>>> root.name
'abcde.zip'
>>> str(root.filename).replace(os.sep, posixpath.sep)
'mem/abcde.zip'
>>> str(root.parent)
'mem'
"""
__repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
def __init__(self, root, at=""):
"""
Construct a Path from a ZipFile or filename.
Note: When the source is an existing ZipFile object,
its type (__class__) will be mutated to a
specialized type. If the caller wishes to retain the
original type, the caller should either create a
separate ZipFile object or pass a filename.
"""
self.root = FastLookup.make(root)
self.at = at
def open(self, mode='r', *args, pwd=None, **kwargs):
"""
Open this entry as text or binary following the semantics
of ``pathlib.Path.open()`` by passing arguments through
to io.TextIOWrapper().
"""
if self.is_dir():
raise IsADirectoryError(self)
zip_mode = mode[0]
if not self.exists() and zip_mode == 'r':
raise FileNotFoundError(self)
stream = self.root.open(self.at, zip_mode, pwd=pwd)
if 'b' in mode:
if args or kwargs:
raise ValueError("encoding args invalid for binary operation")
return stream
return io.TextIOWrapper(stream, *args, **kwargs)
@property
def name(self):
return pathlib.Path(self.at).name or self.filename.name
@property
def suffix(self):
return pathlib.Path(self.at).suffix or self.filename.suffix
@property
def suffixes(self):
return pathlib.Path(self.at).suffixes or self.filename.suffixes
@property
def stem(self):
return pathlib.Path(self.at).stem or self.filename.stem
@property
def filename(self):
return pathlib.Path(self.root.filename).joinpath(self.at)
def read_text(self, *args, **kwargs):
with self.open('r', *args, **kwargs) as strm:
return strm.read()
def read_bytes(self):
with self.open('rb') as strm:
return strm.read()
def _is_child(self, path):
return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
def _next(self, at):
return self.__class__(self.root, at)
def is_dir(self):
return not self.at or self.at.endswith("/")
def is_file(self):
return self.exists() and not self.is_dir()
def exists(self):
return self.at in self.root._name_set()
def iterdir(self):
if not self.is_dir():
raise ValueError("Can't listdir a file")
subs = map(self._next, self.root.namelist())
return filter(self._is_child, subs)
def __str__(self):
return posixpath.join(self.root.filename, self.at)
def __repr__(self):
return self.__repr.format(self=self)
def joinpath(self, *other):
next = posixpath.join(self.at, *map(_pathlib_compat, other))
return self._next(self.root.resolve_dir(next))
__truediv__ = joinpath
@property
def parent(self):
if not self.at:
return self.filename.parent
parent_at = posixpath.dirname(self.at.rstrip('/'))
if parent_at:
parent_at += '/'
return self._next(parent_at)
| 8,425
|
Python
|
.py
| 256
| 25.945313
| 78
| 0.603521
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,878
|
typing_extensions.py
|
rembo10_headphones/lib/pkg_resources/_vendor/typing_extensions.py
|
import abc
import collections
import collections.abc
import functools
import operator
import sys
import types as _types
import typing
__all__ = [
# Super-special typing primitives.
'Any',
'ClassVar',
'Concatenate',
'Final',
'LiteralString',
'ParamSpec',
'ParamSpecArgs',
'ParamSpecKwargs',
'Self',
'Type',
'TypeVar',
'TypeVarTuple',
'Unpack',
# ABCs (from collections.abc).
'Awaitable',
'AsyncIterator',
'AsyncIterable',
'Coroutine',
'AsyncGenerator',
'AsyncContextManager',
'ChainMap',
# Concrete collection types.
'ContextManager',
'Counter',
'Deque',
'DefaultDict',
'NamedTuple',
'OrderedDict',
'TypedDict',
# Structural checks, a.k.a. protocols.
'SupportsIndex',
# One-off things.
'Annotated',
'assert_never',
'assert_type',
'clear_overloads',
'dataclass_transform',
'get_overloads',
'final',
'get_args',
'get_origin',
'get_type_hints',
'IntVar',
'is_typeddict',
'Literal',
'NewType',
'overload',
'override',
'Protocol',
'reveal_type',
'runtime',
'runtime_checkable',
'Text',
'TypeAlias',
'TypeGuard',
'TYPE_CHECKING',
'Never',
'NoReturn',
'Required',
'NotRequired',
]
# for backward compatibility
PEP_560 = True
GenericMeta = type
# The functions below are modified copies of typing internal helpers.
# They are needed by _ProtocolMeta and they provide support for PEP 646.
_marker = object()
def _check_generic(cls, parameters, elen=_marker):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
if not elen:
raise TypeError(f"{cls} is not a generic class")
if elen is _marker:
if not hasattr(cls, "__parameters__") or not cls.__parameters__:
raise TypeError(f"{cls} is not a generic class")
elen = len(cls.__parameters__)
alen = len(parameters)
if alen != elen:
if hasattr(cls, "__parameters__"):
parameters = [p for p in cls.__parameters__ if not _is_unpack(p)]
num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters)
if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples):
return
raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};"
f" actual {alen}, expected {elen}")
if sys.version_info >= (3, 10):
def _should_collect_from_parameters(t):
return isinstance(
t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType)
)
elif sys.version_info >= (3, 9):
def _should_collect_from_parameters(t):
return isinstance(t, (typing._GenericAlias, _types.GenericAlias))
else:
def _should_collect_from_parameters(t):
return isinstance(t, typing._GenericAlias) and not t._special
def _collect_type_vars(types, typevar_types=None):
"""Collect all type variable contained in types in order of
first appearance (lexicographic order). For example::
_collect_type_vars((T, List[S, T])) == (T, S)
"""
if typevar_types is None:
typevar_types = typing.TypeVar
tvars = []
for t in types:
if (
isinstance(t, typevar_types) and
t not in tvars and
not _is_unpack(t)
):
tvars.append(t)
if _should_collect_from_parameters(t):
tvars.extend([t for t in t.__parameters__ if t not in tvars])
return tuple(tvars)
NoReturn = typing.NoReturn
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = typing.TypeVar('T') # Any type.
KT = typing.TypeVar('KT') # Key type.
VT = typing.TypeVar('VT') # Value type.
T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
if sys.version_info >= (3, 11):
from typing import Any
else:
class _AnyMeta(type):
def __instancecheck__(self, obj):
if self is Any:
raise TypeError("typing_extensions.Any cannot be used with isinstance()")
return super().__instancecheck__(obj)
def __repr__(self):
if self is Any:
return "typing_extensions.Any"
return super().__repr__()
class Any(metaclass=_AnyMeta):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
checks.
"""
def __new__(cls, *args, **kwargs):
if cls is Any:
raise TypeError("Any cannot be instantiated")
return super().__new__(cls, *args, **kwargs)
ClassVar = typing.ClassVar
# On older versions of typing there is an internal class named "Final".
# 3.8+
if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
Final = typing.Final
# 3.7
else:
class _FinalForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
Final = _FinalForm('Final',
doc="""A special typing construct to indicate that a name
cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.""")
if sys.version_info >= (3, 11):
final = typing.final
else:
# @final exists in 3.8+, but we backport it for all versions
# before 3.11 to keep support for the __final__ attribute.
# See https://bugs.python.org/issue46342
def final(f):
"""This decorator can be used to indicate to type checkers that
the decorated method cannot be overridden, and decorated class
cannot be subclassed. For example:
class Base:
@final
def done(self) -> None:
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
...
@final
class Leaf:
...
class Other(Leaf): # Error reported by type checker
...
There is no runtime checking of these properties. The decorator
sets the ``__final__`` attribute to ``True`` on the decorated object
to allow runtime introspection.
"""
try:
f.__final__ = True
except (AttributeError, TypeError):
# Skip the attribute silently if it is not writable.
# AttributeError happens if the object has __slots__ or a
# read-only property, TypeError if it's a builtin class.
pass
return f
def IntVar(name):
return typing.TypeVar(name)
# 3.8+:
if hasattr(typing, 'Literal'):
Literal = typing.Literal
# 3.7:
else:
class _LiteralForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
return typing._GenericAlias(self, parameters)
Literal = _LiteralForm('Literal',
doc="""A type that can be used to indicate to type checkers
that the corresponding value has a value literally equivalent
to the provided parameter. For example:
var: Literal[4] = 4
The type checker understands that 'var' is literally equal to
the value 4 and no other value.
Literal[...] cannot be subclassed. There is no runtime
checking verifying that the parameter is actually a value
instead of a type.""")
_overload_dummy = typing._overload_dummy # noqa
if hasattr(typing, "get_overloads"): # 3.11+
overload = typing.overload
get_overloads = typing.get_overloads
clear_overloads = typing.clear_overloads
else:
# {module: {qualname: {firstlineno: func}}}
_overload_registry = collections.defaultdict(
functools.partial(collections.defaultdict, dict)
)
def overload(func):
"""Decorator for overloaded functions/methods.
In a stub file, place two or more stub definitions for the same
function in a row, each decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
In a non-stub file (i.e. a regular .py file), do the same but
follow it with an implementation. The implementation should *not*
be decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
def utf8(value):
# implementation goes here
The overloads for a function can be retrieved at runtime using the
get_overloads() function.
"""
# classmethod and staticmethod
f = getattr(func, "__func__", func)
try:
_overload_registry[f.__module__][f.__qualname__][
f.__code__.co_firstlineno
] = func
except AttributeError:
# Not a normal function; ignore.
pass
return _overload_dummy
def get_overloads(func):
"""Return all defined overloads for *func* as a sequence."""
# classmethod and staticmethod
f = getattr(func, "__func__", func)
if f.__module__ not in _overload_registry:
return []
mod_dict = _overload_registry[f.__module__]
if f.__qualname__ not in mod_dict:
return []
return list(mod_dict[f.__qualname__].values())
def clear_overloads():
"""Clear all overloads in the registry."""
_overload_registry.clear()
# This is not a real generic class. Don't use outside annotations.
Type = typing.Type
# Various ABCs mimicking those in collections.abc.
# A few are simply re-exported for completeness.
Awaitable = typing.Awaitable
Coroutine = typing.Coroutine
AsyncIterable = typing.AsyncIterable
AsyncIterator = typing.AsyncIterator
Deque = typing.Deque
ContextManager = typing.ContextManager
AsyncContextManager = typing.AsyncContextManager
DefaultDict = typing.DefaultDict
# 3.7.2+
if hasattr(typing, 'OrderedDict'):
OrderedDict = typing.OrderedDict
# 3.7.0-3.7.2
else:
OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
Counter = typing.Counter
ChainMap = typing.ChainMap
AsyncGenerator = typing.AsyncGenerator
NewType = typing.NewType
Text = typing.Text
TYPE_CHECKING = typing.TYPE_CHECKING
_PROTO_WHITELIST = ['Callable', 'Awaitable',
'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator',
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
'ContextManager', 'AsyncContextManager']
def _get_protocol_attrs(cls):
attrs = set()
for base in cls.__mro__[:-1]: # without object
if base.__name__ in ('Protocol', 'Generic'):
continue
annotations = getattr(base, '__annotations__', {})
for attr in list(base.__dict__.keys()) + list(annotations.keys()):
if (not attr.startswith('_abc_') and attr not in (
'__abstractmethods__', '__annotations__', '__weakref__',
'_is_protocol', '_is_runtime_protocol', '__dict__',
'__args__', '__slots__',
'__next_in_mro__', '__parameters__', '__origin__',
'__orig_bases__', '__extra__', '__tree_hash__',
'__doc__', '__subclasshook__', '__init__', '__new__',
'__module__', '_MutableMapping__marker', '_gorg')):
attrs.add(attr)
return attrs
def _is_callable_members_only(cls):
return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
def _maybe_adjust_parameters(cls):
"""Helper function used in Protocol.__init_subclass__ and _TypedDictMeta.__new__.
The contents of this function are very similar
to logic found in typing.Generic.__init_subclass__
on the CPython main branch.
"""
tvars = []
if '__orig_bases__' in cls.__dict__:
tvars = typing._collect_type_vars(cls.__orig_bases__)
# Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
# If found, tvars must be a subset of it.
# If not found, tvars is it.
# Also check for and reject plain Generic,
# and reject multiple Generic[...] and/or Protocol[...].
gvars = None
for base in cls.__orig_bases__:
if (isinstance(base, typing._GenericAlias) and
base.__origin__ in (typing.Generic, Protocol)):
# for error messages
the_base = base.__origin__.__name__
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...]"
" and/or Protocol[...] multiple types.")
gvars = base.__parameters__
if gvars is None:
gvars = tvars
else:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
s_args = ', '.join(str(g) for g in gvars)
raise TypeError(f"Some type variables ({s_vars}) are"
f" not listed in {the_base}[{s_args}]")
tvars = gvars
cls.__parameters__ = tuple(tvars)
# 3.8+
if hasattr(typing, 'Protocol'):
Protocol = typing.Protocol
# 3.7
else:
def _no_init(self, *args, **kwargs):
if type(self)._is_protocol:
raise TypeError('Protocols cannot be instantiated')
class _ProtocolMeta(abc.ABCMeta): # noqa: B024
# This metaclass is a bit unfortunate and exists only because of the lack
# of __instancehook__.
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if ((not getattr(cls, '_is_protocol', False) or
_is_callable_members_only(cls)) and
issubclass(instance.__class__, cls)):
return True
if cls._is_protocol:
if all(hasattr(instance, attr) and
(not callable(getattr(cls, attr, None)) or
getattr(instance, attr) is not None)
for attr in _get_protocol_attrs(cls)):
return True
return super().__instancecheck__(instance)
class Protocol(metaclass=_ProtocolMeta):
# There is quite a lot of overlapping code with typing.Generic.
# Unfortunately it is hard to avoid this while these live in two different
# modules. The duplicated code will be removed when Protocol is moved to typing.
"""Base class for protocol classes. Protocol classes are defined as::
class Proto(Protocol):
def meth(self) -> int:
...
Such classes are primarily used with static type checkers that recognize
structural subtyping (static duck-typing), for example::
class C:
def meth(self) -> int:
return 0
def func(x: Proto) -> int:
return x.meth()
func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with
@typing_extensions.runtime act as simple-minded runtime protocol that checks
only the presence of given attributes, ignoring their type signatures.
Protocol classes can be generic, they are defined as::
class GenProto(Protocol[T]):
def meth(self) -> T:
...
"""
__slots__ = ()
_is_protocol = True
def __new__(cls, *args, **kwds):
if cls is Protocol:
raise TypeError("Type Protocol cannot be instantiated; "
"it can only be used as a base class")
return super().__new__(cls)
@typing._tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple):
params = (params,)
if not params and cls is not typing.Tuple:
raise TypeError(
f"Parameter list to {cls.__qualname__}[...] cannot be empty")
msg = "Parameters to generic types must be types."
params = tuple(typing._type_check(p, msg) for p in params) # noqa
if cls is Protocol:
# Generic can only be subscripted with unique type variables.
if not all(isinstance(p, typing.TypeVar) for p in params):
i = 0
while isinstance(params[i], typing.TypeVar):
i += 1
raise TypeError(
"Parameters to Protocol[...] must all be type variables."
f" Parameter {i + 1} is {params[i]}")
if len(set(params)) != len(params):
raise TypeError(
"Parameters to Protocol[...] must all be unique")
else:
# Subscripting a regular Generic subclass.
_check_generic(cls, params, len(cls.__parameters__))
return typing._GenericAlias(cls, params)
def __init_subclass__(cls, *args, **kwargs):
if '__orig_bases__' in cls.__dict__:
error = typing.Generic in cls.__orig_bases__
else:
error = typing.Generic in cls.__bases__
if error:
raise TypeError("Cannot inherit from plain Generic")
_maybe_adjust_parameters(cls)
# Determine if this is a protocol or a concrete subclass.
if not cls.__dict__.get('_is_protocol', None):
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
# Set (or override) the protocol subclass hook.
def _proto_hook(other):
if not cls.__dict__.get('_is_protocol', None):
return NotImplemented
if not getattr(cls, '_is_runtime_protocol', False):
if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
return NotImplemented
raise TypeError("Instance and class checks can only be used with"
" @runtime protocols")
if not _is_callable_members_only(cls):
if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
return NotImplemented
raise TypeError("Protocols with non-method members"
" don't support issubclass()")
if not isinstance(other, type):
# Same error as for issubclass(1, int)
raise TypeError('issubclass() arg 1 must be a class')
for attr in _get_protocol_attrs(cls):
for base in other.__mro__:
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
annotations = getattr(base, '__annotations__', {})
if (isinstance(annotations, typing.Mapping) and
attr in annotations and
isinstance(other, _ProtocolMeta) and
other._is_protocol):
break
else:
return NotImplemented
return True
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
# We have nothing more to do for non-protocols.
if not cls._is_protocol:
return
# Check consistency of bases.
for base in cls.__bases__:
if not (base in (object, typing.Generic) or
base.__module__ == 'collections.abc' and
base.__name__ in _PROTO_WHITELIST or
isinstance(base, _ProtocolMeta) and base._is_protocol):
raise TypeError('Protocols can only inherit from other'
f' protocols, got {repr(base)}')
cls.__init__ = _no_init
# 3.8+
if hasattr(typing, 'runtime_checkable'):
runtime_checkable = typing.runtime_checkable
# 3.7
else:
def runtime_checkable(cls):
"""Mark a protocol class as a runtime protocol, so that it
can be used with isinstance() and issubclass(). Raise TypeError
if applied to a non-protocol class.
This allows a simple-minded structural check very similar to the
one-offs in collections.abc such as Hashable.
"""
if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
raise TypeError('@runtime_checkable can be only applied to protocol classes,'
f' got {cls!r}')
cls._is_runtime_protocol = True
return cls
# Exists for backwards compatibility.
runtime = runtime_checkable
# 3.8+
if hasattr(typing, 'SupportsIndex'):
SupportsIndex = typing.SupportsIndex
# 3.7
else:
@runtime_checkable
class SupportsIndex(Protocol):
__slots__ = ()
@abc.abstractmethod
def __index__(self) -> int:
pass
if hasattr(typing, "Required"):
# The standard library TypedDict in Python 3.8 does not store runtime information
# about which (if any) keys are optional. See https://bugs.python.org/issue38834
# The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
# keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
# The standard library TypedDict below Python 3.11 does not store runtime
# information about optional and required keys when using Required or NotRequired.
# Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11.
TypedDict = typing.TypedDict
_TypedDictMeta = typing._TypedDictMeta
is_typeddict = typing.is_typeddict
else:
def _check_fails(cls, other):
try:
if sys._getframe(1).f_globals['__name__'] not in ['abc',
'functools',
'typing']:
# Typed dicts are only for static structural subtyping.
raise TypeError('TypedDict does not support instance and class checks')
except (AttributeError, ValueError):
pass
return False
def _dict_new(*args, **kwargs):
if not args:
raise TypeError('TypedDict.__new__(): not enough arguments')
_, args = args[0], args[1:] # allow the "cls" keyword be passed
return dict(*args, **kwargs)
_dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
def _typeddict_new(*args, total=True, **kwargs):
if not args:
raise TypeError('TypedDict.__new__(): not enough arguments')
_, args = args[0], args[1:] # allow the "cls" keyword be passed
if args:
typename, args = args[0], args[1:] # allow the "_typename" keyword be passed
elif '_typename' in kwargs:
typename = kwargs.pop('_typename')
import warnings
warnings.warn("Passing '_typename' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
raise TypeError("TypedDict.__new__() missing 1 required positional "
"argument: '_typename'")
if args:
try:
fields, = args # allow the "_fields" keyword be passed
except ValueError:
raise TypeError('TypedDict.__new__() takes from 2 to 3 '
f'positional arguments but {len(args) + 2} '
'were given')
elif '_fields' in kwargs and len(kwargs) == 1:
fields = kwargs.pop('_fields')
import warnings
warnings.warn("Passing '_fields' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
fields = None
if fields is None:
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
ns = {'__annotations__': dict(fields)}
try:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return _TypedDictMeta(typename, (), ns, total=total)
_typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
' /, *, total=True, **kwargs)')
class _TypedDictMeta(type):
def __init__(cls, name, bases, ns, total=True):
super().__init__(name, bases, ns)
def __new__(cls, name, bases, ns, total=True):
# Create new typed dict class object.
# This method is called directly when TypedDict is subclassed,
# or via _typeddict_new when TypedDict is instantiated. This way
# TypedDict supports all three syntaxes described in its docstring.
# Subclasses and instances of TypedDict return actual dictionaries
# via _dict_new.
ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
# Don't insert typing.Generic into __bases__ here,
# or Generic.__init_subclass__ will raise TypeError
# in the super().__new__() call.
# Instead, monkey-patch __bases__ onto the class after it's been created.
tp_dict = super().__new__(cls, name, (dict,), ns)
if any(issubclass(base, typing.Generic) for base in bases):
tp_dict.__bases__ = (typing.Generic, dict)
_maybe_adjust_parameters(tp_dict)
annotations = {}
own_annotations = ns.get('__annotations__', {})
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
own_annotations = {
n: typing._type_check(tp, msg) for n, tp in own_annotations.items()
}
required_keys = set()
optional_keys = set()
for base in bases:
annotations.update(base.__dict__.get('__annotations__', {}))
required_keys.update(base.__dict__.get('__required_keys__', ()))
optional_keys.update(base.__dict__.get('__optional_keys__', ()))
annotations.update(own_annotations)
for annotation_key, annotation_type in own_annotations.items():
annotation_origin = get_origin(annotation_type)
if annotation_origin is Annotated:
annotation_args = get_args(annotation_type)
if annotation_args:
annotation_type = annotation_args[0]
annotation_origin = get_origin(annotation_type)
if annotation_origin is Required:
required_keys.add(annotation_key)
elif annotation_origin is NotRequired:
optional_keys.add(annotation_key)
elif total:
required_keys.add(annotation_key)
else:
optional_keys.add(annotation_key)
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
if not hasattr(tp_dict, '__total__'):
tp_dict.__total__ = total
return tp_dict
__instancecheck__ = __subclasscheck__ = _check_fails
TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
TypedDict.__module__ = __name__
TypedDict.__doc__ = \
"""A simple typed name space. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
instances to have a certain set of keys, with each key
associated with a value of a consistent type. This expectation
is not checked at runtime but is only enforced by type checkers.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
"""
if hasattr(typing, "_TypedDictMeta"):
_TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta)
else:
_TYPEDDICT_TYPES = (_TypedDictMeta,)
def is_typeddict(tp):
"""Check if an annotation is a TypedDict class
For example::
class Film(TypedDict):
title: str
year: int
is_typeddict(Film) # => True
is_typeddict(Union[list, str]) # => False
"""
return isinstance(tp, tuple(_TYPEDDICT_TYPES))
if hasattr(typing, "assert_type"):
assert_type = typing.assert_type
else:
def assert_type(__val, __typ):
"""Assert (to the type checker) that the value is of the given type.
When the type checker encounters a call to assert_type(), it
emits an error if the value is not of the specified type::
def greet(name: str) -> None:
assert_type(name, str) # ok
assert_type(name, int) # type checker error
At runtime this returns the first argument unchanged and otherwise
does nothing.
"""
return __val
if hasattr(typing, "Required"):
get_type_hints = typing.get_type_hints
else:
import functools
import types
# replaces _strip_annotations()
def _strip_extras(t):
"""Strips Annotated, Required and NotRequired from a given type."""
if isinstance(t, _AnnotatedAlias):
return _strip_extras(t.__origin__)
if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired):
return _strip_extras(t.__args__[0])
if isinstance(t, typing._GenericAlias):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return t.copy_with(stripped_args)
if hasattr(types, "GenericAlias") and isinstance(t, types.GenericAlias):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return types.GenericAlias(t.__origin__, stripped_args)
if hasattr(types, "UnionType") and isinstance(t, types.UnionType):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return functools.reduce(operator.or_, stripped_args)
return t
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T'
(unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if hasattr(typing, "Annotated"):
hint = typing.get_type_hints(
obj, globalns=globalns, localns=localns, include_extras=True
)
else:
hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
if include_extras:
return hint
return {k: _strip_extras(t) for k, t in hint.items()}
# Python 3.9+ has PEP 593 (Annotated)
if hasattr(typing, 'Annotated'):
Annotated = typing.Annotated
# Not exported and not a public API, but needed for get_origin() and get_args()
# to work.
_AnnotatedAlias = typing._AnnotatedAlias
# 3.7-3.8
else:
class _AnnotatedAlias(typing._GenericAlias, _root=True):
"""Runtime representation of an annotated type.
At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
with extra annotations. The alias behaves like a normal typing alias,
instantiating is the same as instantiating the underlying type, binding
it to types is also the same.
"""
def __init__(self, origin, metadata):
if isinstance(origin, _AnnotatedAlias):
metadata = origin.__metadata__ + metadata
origin = origin.__origin__
super().__init__(origin, origin)
self.__metadata__ = metadata
def copy_with(self, params):
assert len(params) == 1
new_type = params[0]
return _AnnotatedAlias(new_type, self.__metadata__)
def __repr__(self):
return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
f"{', '.join(repr(a) for a in self.__metadata__)}]")
def __reduce__(self):
return operator.getitem, (
Annotated, (self.__origin__,) + self.__metadata__
)
def __eq__(self, other):
if not isinstance(other, _AnnotatedAlias):
return NotImplemented
if self.__origin__ != other.__origin__:
return False
return self.__metadata__ == other.__metadata__
def __hash__(self):
return hash((self.__origin__, self.__metadata__))
class Annotated:
"""Add context specific metadata to a type.
Example: Annotated[int, runtime_check.Unsigned] indicates to the
hypothetical runtime_check module that this type is an unsigned int.
Every other consumer of this type can ignore this metadata and treat
this type as int.
The first argument to Annotated must be a valid type (and will be in
the __origin__ field), the remaining arguments are kept as a tuple in
the __extra__ field.
Details:
- It's an error to call `Annotated` with less than two arguments.
- Nested Annotated are flattened::
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
- Instantiating an annotated type is equivalent to instantiating the
underlying type::
Annotated[C, Ann1](5) == C(5)
- Annotated can be used as a generic type alias::
Optimized = Annotated[T, runtime.Optimize()]
Optimized[int] == Annotated[int, runtime.Optimize()]
OptimizedList = Annotated[List[T], runtime.Optimize()]
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise TypeError("Type Annotated cannot be instantiated.")
@typing._tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be used "
"with at least two arguments (a type and an "
"annotation).")
allowed_special_forms = (ClassVar, Final)
if get_origin(params[0]) in allowed_special_forms:
origin = params[0]
else:
msg = "Annotated[t, ...]: t must be a type."
origin = typing._type_check(params[0], msg)
metadata = tuple(params[1:])
return _AnnotatedAlias(origin, metadata)
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
f"Cannot subclass {cls.__module__}.Annotated"
)
# Python 3.8 has get_origin() and get_args() but those implementations aren't
# Annotated-aware, so we can't use those. Python 3.9's versions don't support
# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do.
if sys.version_info[:2] >= (3, 10):
get_origin = typing.get_origin
get_args = typing.get_args
# 3.7-3.9
else:
try:
# 3.9+
from typing import _BaseGenericAlias
except ImportError:
_BaseGenericAlias = typing._GenericAlias
try:
# 3.9+
from typing import GenericAlias as _typing_GenericAlias
except ImportError:
_typing_GenericAlias = typing._GenericAlias
def get_origin(tp):
"""Get the unsubscripted version of a type.
This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
and Annotated. Return None for unsupported types. Examples::
get_origin(Literal[42]) is Literal
get_origin(int) is None
get_origin(ClassVar[int]) is ClassVar
get_origin(Generic) is Generic
get_origin(Generic[T]) is Generic
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
get_origin(P.args) is P
"""
if isinstance(tp, _AnnotatedAlias):
return Annotated
if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias,
ParamSpecArgs, ParamSpecKwargs)):
return tp.__origin__
if tp is typing.Generic:
return typing.Generic
return None
def get_args(tp):
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if isinstance(tp, _AnnotatedAlias):
return (tp.__origin__,) + tp.__metadata__
if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)):
if getattr(tp, "_special", False):
return ()
res = tp.__args__
if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
res = (list(res[:-1]), res[-1])
return res
return ()
# 3.10+
if hasattr(typing, 'TypeAlias'):
TypeAlias = typing.TypeAlias
# 3.9
elif sys.version_info[:2] >= (3, 9):
class _TypeAliasForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
@_TypeAliasForm
def TypeAlias(self, parameters):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
raise TypeError(f"{self} is not subscriptable")
# 3.7-3.8
else:
class _TypeAliasForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
TypeAlias = _TypeAliasForm('TypeAlias',
doc="""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example
above.""")
class _DefaultMixin:
"""Mixin for TypeVarLike defaults."""
__slots__ = ()
def __init__(self, default):
if isinstance(default, (tuple, list)):
self.__default__ = tuple((typing._type_check(d, "Default must be a type")
for d in default))
elif default:
self.__default__ = typing._type_check(default, "Default must be a type")
else:
self.__default__ = None
# Add default and infer_variance parameters from PEP 696 and 695
class TypeVar(typing.TypeVar, _DefaultMixin, _root=True):
"""Type variable."""
__module__ = 'typing'
def __init__(self, name, *constraints, bound=None,
covariant=False, contravariant=False,
default=None, infer_variance=False):
super().__init__(name, *constraints, bound=bound, covariant=covariant,
contravariant=contravariant)
_DefaultMixin.__init__(self, default)
self.__infer_variance__ = infer_variance
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
# Python 3.10+ has PEP 612
if hasattr(typing, 'ParamSpecArgs'):
ParamSpecArgs = typing.ParamSpecArgs
ParamSpecKwargs = typing.ParamSpecKwargs
# 3.7-3.9
else:
class _Immutable:
"""Mixin to indicate that object should not be copied."""
__slots__ = ()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
class ParamSpecArgs(_Immutable):
"""The args for a ParamSpec object.
Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
ParamSpecArgs objects have a reference back to their ParamSpec:
P.args.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.args"
def __eq__(self, other):
if not isinstance(other, ParamSpecArgs):
return NotImplemented
return self.__origin__ == other.__origin__
class ParamSpecKwargs(_Immutable):
"""The kwargs for a ParamSpec object.
Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
ParamSpecKwargs objects have a reference back to their ParamSpec:
P.kwargs.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.kwargs"
def __eq__(self, other):
if not isinstance(other, ParamSpecKwargs):
return NotImplemented
return self.__origin__ == other.__origin__
# 3.10+
if hasattr(typing, 'ParamSpec'):
# Add default Parameter - PEP 696
class ParamSpec(typing.ParamSpec, _DefaultMixin, _root=True):
"""Parameter specification variable."""
__module__ = 'typing'
def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
default=None):
super().__init__(name, bound=bound, covariant=covariant,
contravariant=contravariant)
_DefaultMixin.__init__(self, default)
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
# 3.7-3.9
else:
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
class ParamSpec(list, _DefaultMixin):
"""Parameter specification variable.
Usage::
P = ParamSpec('P')
Parameter specification variables exist primarily for the benefit of static
type checkers. They are used to forward the parameter types of one
callable to another callable, a pattern commonly found in higher order
functions and decorators. They are only valid when used in ``Concatenate``,
or s the first argument to ``Callable``. In Python 3.10 and higher,
they are also supported in user-defined Generics at runtime.
See class Generic for more information on generic types. An
example for annotating a decorator::
T = TypeVar('T')
P = ParamSpec('P')
def add_logging(f: Callable[P, T]) -> Callable[P, T]:
'''A type-safe decorator to add logging to a function.'''
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
logging.info(f'{f.__name__} was called')
return f(*args, **kwargs)
return inner
@add_logging
def add_two(x: float, y: float) -> float:
'''Add two numbers together.'''
return x + y
Parameter specification variables defined with covariant=True or
contravariant=True can be used to declare covariant or contravariant
generic types. These keyword arguments are valid, but their actual semantics
are yet to be decided. See PEP 612 for details.
Parameter specification variables can be introspected. e.g.:
P.__name__ == 'T'
P.__bound__ == None
P.__covariant__ == False
P.__contravariant__ == False
Note that only parameter specification variables defined in global scope can
be pickled.
"""
# Trick Generic __parameters__.
__class__ = typing.TypeVar
@property
def args(self):
return ParamSpecArgs(self)
@property
def kwargs(self):
return ParamSpecKwargs(self)
def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
default=None):
super().__init__([self])
self.__name__ = name
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
if bound:
self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
else:
self.__bound__ = None
_DefaultMixin.__init__(self, default)
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
def __repr__(self):
if self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
# Hack to get typing._type_check to pass.
def __call__(self, *args, **kwargs):
pass
# 3.7-3.9
if not hasattr(typing, 'Concatenate'):
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
class _ConcatenateGenericAlias(list):
# Trick Generic into looking into this for __parameters__.
__class__ = typing._GenericAlias
# Flag in 3.8.
_special = False
def __init__(self, origin, args):
super().__init__(args)
self.__origin__ = origin
self.__args__ = args
def __repr__(self):
_type_repr = typing._type_repr
return (f'{_type_repr(self.__origin__)}'
f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
def __hash__(self):
return hash((self.__origin__, self.__args__))
# Hack to get typing._type_check to pass in Generic.
def __call__(self, *args, **kwargs):
pass
@property
def __parameters__(self):
return tuple(
tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
)
# 3.7-3.9
@typing._tp_cache
def _concatenate_getitem(self, parameters):
if parameters == ():
raise TypeError("Cannot take a Concatenate of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if not isinstance(parameters[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
msg = "Concatenate[arg, ...]: each arg must be a type."
parameters = tuple(typing._type_check(p, msg) for p in parameters)
return _ConcatenateGenericAlias(self, parameters)
# 3.10+
if hasattr(typing, 'Concatenate'):
Concatenate = typing.Concatenate
_ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
# 3.9
elif sys.version_info[:2] >= (3, 9):
@_TypeAliasForm
def Concatenate(self, parameters):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
return _concatenate_getitem(self, parameters)
# 3.7-8
else:
class _ConcatenateForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
return _concatenate_getitem(self, parameters)
Concatenate = _ConcatenateForm(
'Concatenate',
doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
""")
# 3.10+
if hasattr(typing, 'TypeGuard'):
TypeGuard = typing.TypeGuard
# 3.9
elif sys.version_info[:2] >= (3, 9):
class _TypeGuardForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
@_TypeGuardForm
def TypeGuard(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
item = typing._type_check(parameters, f'{self} accepts only a single type.')
return typing._GenericAlias(self, (item,))
# 3.7-3.8
else:
class _TypeGuardForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type')
return typing._GenericAlias(self, (item,))
TypeGuard = _TypeGuardForm(
'TypeGuard',
doc="""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
""")
# Vendored from cpython typing._SpecialFrom
class _SpecialForm(typing._Final, _root=True):
__slots__ = ('_name', '__doc__', '_getitem')
def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
def __getattr__(self, item):
if item in {'__name__', '__qualname__'}:
return self._name
raise AttributeError(item)
def __mro_entries__(self, bases):
raise TypeError(f"Cannot subclass {self!r}")
def __repr__(self):
return f'typing_extensions.{self._name}'
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
def __or__(self, other):
return typing.Union[self, other]
def __ror__(self, other):
return typing.Union[other, self]
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
@typing._tp_cache
def __getitem__(self, parameters):
return self._getitem(self, parameters)
if hasattr(typing, "LiteralString"):
LiteralString = typing.LiteralString
else:
@_SpecialForm
def LiteralString(self, params):
"""Represents an arbitrary literal string.
Example::
from typing_extensions import LiteralString
def query(sql: LiteralString) -> ...:
...
query("SELECT * FROM table") # ok
query(f"SELECT * FROM {input()}") # not ok
See PEP 675 for details.
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, "Self"):
Self = typing.Self
else:
@_SpecialForm
def Self(self, params):
"""Used to spell the type of "self" in classes.
Example::
from typing import Self
class ReturnsSelf:
def parse(self, data: bytes) -> Self:
...
return self
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, "Never"):
Never = typing.Never
else:
@_SpecialForm
def Never(self, params):
"""The bottom type, a type that has no members.
This can be used to define a function that should never be
called, or a function that never returns::
from typing_extensions import Never
def never_call_me(arg: Never) -> None:
pass
def int_or_str(arg: int | str) -> None:
never_call_me(arg) # type checker error
match arg:
case int():
print("It's an int")
case str():
print("It's a str")
case _:
never_call_me(arg) # ok, arg is of type Never
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, 'Required'):
Required = typing.Required
NotRequired = typing.NotRequired
elif sys.version_info[:2] >= (3, 9):
class _ExtensionsSpecialForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
@_ExtensionsSpecialForm
def Required(self, parameters):
"""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
@_ExtensionsSpecialForm
def NotRequired(self, parameters):
"""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
else:
class _RequiredForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
Required = _RequiredForm(
'Required',
doc="""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
""")
NotRequired = _RequiredForm(
'NotRequired',
doc="""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
""")
if hasattr(typing, "Unpack"): # 3.11+
Unpack = typing.Unpack
elif sys.version_info[:2] >= (3, 9):
class _UnpackSpecialForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
class _UnpackAlias(typing._GenericAlias, _root=True):
__class__ = typing.TypeVar
@_UnpackSpecialForm
def Unpack(self, parameters):
"""A special typing construct to unpack a variadic type. For example:
Shape = TypeVarTuple('Shape')
Batch = NewType('Batch', int)
def add_batch_axis(
x: Array[Unpack[Shape]]
) -> Array[Batch, Unpack[Shape]]: ...
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return _UnpackAlias(self, (item,))
def _is_unpack(obj):
return isinstance(obj, _UnpackAlias)
else:
class _UnpackAlias(typing._GenericAlias, _root=True):
__class__ = typing.TypeVar
class _UnpackForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return _UnpackAlias(self, (item,))
Unpack = _UnpackForm(
'Unpack',
doc="""A special typing construct to unpack a variadic type. For example:
Shape = TypeVarTuple('Shape')
Batch = NewType('Batch', int)
def add_batch_axis(
x: Array[Unpack[Shape]]
) -> Array[Batch, Unpack[Shape]]: ...
""")
def _is_unpack(obj):
return isinstance(obj, _UnpackAlias)
if hasattr(typing, "TypeVarTuple"): # 3.11+
# Add default Parameter - PEP 696
class TypeVarTuple(typing.TypeVarTuple, _DefaultMixin, _root=True):
"""Type variable tuple."""
def __init__(self, name, *, default=None):
super().__init__(name)
_DefaultMixin.__init__(self, default)
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
else:
class TypeVarTuple(_DefaultMixin):
"""Type variable tuple.
Usage::
Ts = TypeVarTuple('Ts')
In the same way that a normal type variable is a stand-in for a single
type such as ``int``, a type variable *tuple* is a stand-in for a *tuple*
type such as ``Tuple[int, str]``.
Type variable tuples can be used in ``Generic`` declarations.
Consider the following example::
class Array(Generic[*Ts]): ...
The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``,
where ``T1`` and ``T2`` are type variables. To use these type variables
as type parameters of ``Array``, we must *unpack* the type variable tuple using
the star operator: ``*Ts``. The signature of ``Array`` then behaves
as if we had simply written ``class Array(Generic[T1, T2]): ...``.
In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows
us to parameterise the class with an *arbitrary* number of type parameters.
Type variable tuples can be used anywhere a normal ``TypeVar`` can.
This includes class definitions, as shown above, as well as function
signatures and variable annotations::
class Array(Generic[*Ts]):
def __init__(self, shape: Tuple[*Ts]):
self._shape: Tuple[*Ts] = shape
def get_shape(self) -> Tuple[*Ts]:
return self._shape
shape = (Height(480), Width(640))
x: Array[Height, Width] = Array(shape)
y = abs(x) # Inferred type is Array[Height, Width]
z = x + x # ... is Array[Height, Width]
x.get_shape() # ... is tuple[Height, Width]
"""
# Trick Generic __parameters__.
__class__ = typing.TypeVar
def __iter__(self):
yield self.__unpacked__
def __init__(self, name, *, default=None):
self.__name__ = name
_DefaultMixin.__init__(self, default)
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
self.__unpacked__ = Unpack[self]
def __repr__(self):
return self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
def __init_subclass__(self, *args, **kwds):
if '_root' not in kwds:
raise TypeError("Cannot subclass special typing classes")
if hasattr(typing, "reveal_type"):
reveal_type = typing.reveal_type
else:
def reveal_type(__obj: T) -> T:
"""Reveal the inferred type of a variable.
When a static type checker encounters a call to ``reveal_type()``,
it will emit the inferred type of the argument::
x: int = 1
reveal_type(x)
Running a static type checker (e.g., ``mypy``) on this example
will produce output similar to 'Revealed type is "builtins.int"'.
At runtime, the function prints the runtime type of the
argument and returns it unchanged.
"""
print(f"Runtime type is {type(__obj).__name__!r}", file=sys.stderr)
return __obj
if hasattr(typing, "assert_never"):
assert_never = typing.assert_never
else:
def assert_never(__arg: Never) -> Never:
"""Assert to the type checker that a line of code is unreachable.
Example::
def int_or_str(arg: int | str) -> None:
match arg:
case int():
print("It's an int")
case str():
print("It's a str")
case _:
assert_never(arg)
If a type checker finds that a call to assert_never() is
reachable, it will emit an error.
At runtime, this throws an exception when called.
"""
raise AssertionError("Expected code to be unreachable")
if hasattr(typing, 'dataclass_transform'):
dataclass_transform = typing.dataclass_transform
else:
def dataclass_transform(
*,
eq_default: bool = True,
order_default: bool = False,
kw_only_default: bool = False,
field_specifiers: typing.Tuple[
typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]],
...
] = (),
**kwargs: typing.Any,
) -> typing.Callable[[T], T]:
"""Decorator that marks a function, class, or metaclass as providing
dataclass-like behavior.
Example:
from typing_extensions import dataclass_transform
_T = TypeVar("_T")
# Used on a decorator function
@dataclass_transform()
def create_model(cls: type[_T]) -> type[_T]:
...
return cls
@create_model
class CustomerModel:
id: int
name: str
# Used on a base class
@dataclass_transform()
class ModelBase: ...
class CustomerModel(ModelBase):
id: int
name: str
# Used on a metaclass
@dataclass_transform()
class ModelMeta(type): ...
class ModelBase(metaclass=ModelMeta): ...
class CustomerModel(ModelBase):
id: int
name: str
Each of the ``CustomerModel`` classes defined in this example will now
behave similarly to a dataclass created with the ``@dataclasses.dataclass``
decorator. For example, the type checker will synthesize an ``__init__``
method.
The arguments to this decorator can be used to customize this behavior:
- ``eq_default`` indicates whether the ``eq`` parameter is assumed to be
True or False if it is omitted by the caller.
- ``order_default`` indicates whether the ``order`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``kw_only_default`` indicates whether the ``kw_only`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``field_specifiers`` specifies a static list of supported classes
or functions that describe fields, similar to ``dataclasses.field()``.
At runtime, this decorator records its arguments in the
``__dataclass_transform__`` attribute on the decorated object.
See PEP 681 for details.
"""
def decorator(cls_or_fn):
cls_or_fn.__dataclass_transform__ = {
"eq_default": eq_default,
"order_default": order_default,
"kw_only_default": kw_only_default,
"field_specifiers": field_specifiers,
"kwargs": kwargs,
}
return cls_or_fn
return decorator
if hasattr(typing, "override"):
override = typing.override
else:
_F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any])
def override(__arg: _F) -> _F:
"""Indicate that a method is intended to override a method in a base class.
Usage:
class Base:
def method(self) -> None: ...
pass
class Child(Base):
@override
def method(self) -> None:
super().method()
When this decorator is applied to a method, the type checker will
validate that it overrides a method with the same name on a base class.
This helps prevent bugs that may occur when a base class is changed
without an equivalent change to a child class.
See PEP 698 for details.
"""
return __arg
# We have to do some monkey patching to deal with the dual nature of
# Unpack/TypeVarTuple:
# - We want Unpack to be a kind of TypeVar so it gets accepted in
# Generic[Unpack[Ts]]
# - We want it to *not* be treated as a TypeVar for the purposes of
# counting generic parameters, so that when we subscript a generic,
# the runtime doesn't try to substitute the Unpack with the subscripted type.
if not hasattr(typing, "TypeVarTuple"):
typing._collect_type_vars = _collect_type_vars
typing._check_generic = _check_generic
# Backport typing.NamedTuple as it exists in Python 3.11.
# In 3.11, the ability to define generic `NamedTuple`s was supported.
# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8.
if sys.version_info >= (3, 11):
NamedTuple = typing.NamedTuple
else:
def _caller():
try:
return sys._getframe(2).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError): # For platforms without _getframe()
return None
def _make_nmtuple(name, types, module, defaults=()):
fields = [n for n, t in types]
annotations = {n: typing._type_check(t, f"field {n} annotation must be a type")
for n, t in types}
nm_tpl = collections.namedtuple(name, fields,
defaults=defaults, module=module)
nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations
# The `_field_types` attribute was removed in 3.9;
# in earlier versions, it is the same as the `__annotations__` attribute
if sys.version_info < (3, 9):
nm_tpl._field_types = annotations
return nm_tpl
_prohibited_namedtuple_fields = typing._prohibited
_special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'})
class _NamedTupleMeta(type):
def __new__(cls, typename, bases, ns):
assert _NamedTuple in bases
for base in bases:
if base is not _NamedTuple and base is not typing.Generic:
raise TypeError(
'can only inherit from a NamedTuple type and Generic')
bases = tuple(tuple if base is _NamedTuple else base for base in bases)
types = ns.get('__annotations__', {})
default_names = []
for field_name in types:
if field_name in ns:
default_names.append(field_name)
elif default_names:
raise TypeError(f"Non-default namedtuple field {field_name} "
f"cannot follow default field"
f"{'s' if len(default_names) > 1 else ''} "
f"{', '.join(default_names)}")
nm_tpl = _make_nmtuple(
typename, types.items(),
defaults=[ns[n] for n in default_names],
module=ns['__module__']
)
nm_tpl.__bases__ = bases
if typing.Generic in bases:
class_getitem = typing.Generic.__class_getitem__.__func__
nm_tpl.__class_getitem__ = classmethod(class_getitem)
# update from user namespace without overriding special namedtuple attributes
for key in ns:
if key in _prohibited_namedtuple_fields:
raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
elif key not in _special_namedtuple_fields and key not in nm_tpl._fields:
setattr(nm_tpl, key, ns[key])
if typing.Generic in bases:
nm_tpl.__init_subclass__()
return nm_tpl
def NamedTuple(__typename, __fields=None, **kwargs):
if __fields is None:
__fields = kwargs.items()
elif kwargs:
raise TypeError("Either list of fields or keywords"
" can be provided to NamedTuple, not both")
return _make_nmtuple(__typename, __fields, module=_caller())
NamedTuple.__doc__ = typing.NamedTuple.__doc__
_NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {})
# On 3.8+, alter the signature so that it matches typing.NamedTuple.
# The signature of typing.NamedTuple on >=3.8 is invalid syntax in Python 3.7,
# so just leave the signature as it is on 3.7.
if sys.version_info >= (3, 8):
NamedTuple.__text_signature__ = '(typename, fields=None, /, **kwargs)'
def _namedtuple_mro_entries(bases):
assert NamedTuple in bases
return (_NamedTuple,)
NamedTuple.__mro_entries__ = _namedtuple_mro_entries
| 80,078
|
Python
|
.py
| 1,774
| 34.07779
| 90
| 0.578574
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,879
|
more.pyi
|
rembo10_headphones/lib/pkg_resources/_vendor/more_itertools/more.pyi
|
"""Stubs for more_itertools.more"""
from __future__ import annotations
from types import TracebackType
from typing import (
Any,
Callable,
Container,
ContextManager,
Generic,
Hashable,
Iterable,
Iterator,
overload,
Reversible,
Sequence,
Sized,
Type,
TypeVar,
type_check_only,
)
from typing_extensions import Protocol
# Type and type variable definitions
_T = TypeVar('_T')
_T1 = TypeVar('_T1')
_T2 = TypeVar('_T2')
_U = TypeVar('_U')
_V = TypeVar('_V')
_W = TypeVar('_W')
_T_co = TypeVar('_T_co', covariant=True)
_GenFn = TypeVar('_GenFn', bound=Callable[..., Iterator[object]])
_Raisable = BaseException | Type[BaseException]
@type_check_only
class _SizedIterable(Protocol[_T_co], Sized, Iterable[_T_co]): ...
@type_check_only
class _SizedReversible(Protocol[_T_co], Sized, Reversible[_T_co]): ...
@type_check_only
class _SupportsSlicing(Protocol[_T_co]):
def __getitem__(self, __k: slice) -> _T_co: ...
def chunked(
iterable: Iterable[_T], n: int | None, strict: bool = ...
) -> Iterator[list[_T]]: ...
@overload
def first(iterable: Iterable[_T]) -> _T: ...
@overload
def first(iterable: Iterable[_T], default: _U) -> _T | _U: ...
@overload
def last(iterable: Iterable[_T]) -> _T: ...
@overload
def last(iterable: Iterable[_T], default: _U) -> _T | _U: ...
@overload
def nth_or_last(iterable: Iterable[_T], n: int) -> _T: ...
@overload
def nth_or_last(iterable: Iterable[_T], n: int, default: _U) -> _T | _U: ...
class peekable(Generic[_T], Iterator[_T]):
def __init__(self, iterable: Iterable[_T]) -> None: ...
def __iter__(self) -> peekable[_T]: ...
def __bool__(self) -> bool: ...
@overload
def peek(self) -> _T: ...
@overload
def peek(self, default: _U) -> _T | _U: ...
def prepend(self, *items: _T) -> None: ...
def __next__(self) -> _T: ...
@overload
def __getitem__(self, index: int) -> _T: ...
@overload
def __getitem__(self, index: slice) -> list[_T]: ...
def consumer(func: _GenFn) -> _GenFn: ...
def ilen(iterable: Iterable[object]) -> int: ...
def iterate(func: Callable[[_T], _T], start: _T) -> Iterator[_T]: ...
def with_iter(
context_manager: ContextManager[Iterable[_T]],
) -> Iterator[_T]: ...
def one(
iterable: Iterable[_T],
too_short: _Raisable | None = ...,
too_long: _Raisable | None = ...,
) -> _T: ...
def raise_(exception: _Raisable, *args: Any) -> None: ...
def strictly_n(
iterable: Iterable[_T],
n: int,
too_short: _GenFn | None = ...,
too_long: _GenFn | None = ...,
) -> list[_T]: ...
def distinct_permutations(
iterable: Iterable[_T], r: int | None = ...
) -> Iterator[tuple[_T, ...]]: ...
def intersperse(
e: _U, iterable: Iterable[_T], n: int = ...
) -> Iterator[_T | _U]: ...
def unique_to_each(*iterables: Iterable[_T]) -> list[list[_T]]: ...
@overload
def windowed(
seq: Iterable[_T], n: int, *, step: int = ...
) -> Iterator[tuple[_T | None, ...]]: ...
@overload
def windowed(
seq: Iterable[_T], n: int, fillvalue: _U, step: int = ...
) -> Iterator[tuple[_T | _U, ...]]: ...
def substrings(iterable: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
def substrings_indexes(
seq: Sequence[_T], reverse: bool = ...
) -> Iterator[tuple[Sequence[_T], int, int]]: ...
class bucket(Generic[_T, _U], Container[_U]):
def __init__(
self,
iterable: Iterable[_T],
key: Callable[[_T], _U],
validator: Callable[[object], object] | None = ...,
) -> None: ...
def __contains__(self, value: object) -> bool: ...
def __iter__(self) -> Iterator[_U]: ...
def __getitem__(self, value: object) -> Iterator[_T]: ...
def spy(
iterable: Iterable[_T], n: int = ...
) -> tuple[list[_T], Iterator[_T]]: ...
def interleave(*iterables: Iterable[_T]) -> Iterator[_T]: ...
def interleave_longest(*iterables: Iterable[_T]) -> Iterator[_T]: ...
def interleave_evenly(
iterables: list[Iterable[_T]], lengths: list[int] | None = ...
) -> Iterator[_T]: ...
def collapse(
iterable: Iterable[Any],
base_type: type | None = ...,
levels: int | None = ...,
) -> Iterator[Any]: ...
@overload
def side_effect(
func: Callable[[_T], object],
iterable: Iterable[_T],
chunk_size: None = ...,
before: Callable[[], object] | None = ...,
after: Callable[[], object] | None = ...,
) -> Iterator[_T]: ...
@overload
def side_effect(
func: Callable[[list[_T]], object],
iterable: Iterable[_T],
chunk_size: int,
before: Callable[[], object] | None = ...,
after: Callable[[], object] | None = ...,
) -> Iterator[_T]: ...
def sliced(
seq: _SupportsSlicing[_T], n: int, strict: bool = ...
) -> Iterator[_T]: ...
def split_at(
iterable: Iterable[_T],
pred: Callable[[_T], object],
maxsplit: int = ...,
keep_separator: bool = ...,
) -> Iterator[list[_T]]: ...
def split_before(
iterable: Iterable[_T], pred: Callable[[_T], object], maxsplit: int = ...
) -> Iterator[list[_T]]: ...
def split_after(
iterable: Iterable[_T], pred: Callable[[_T], object], maxsplit: int = ...
) -> Iterator[list[_T]]: ...
def split_when(
iterable: Iterable[_T],
pred: Callable[[_T, _T], object],
maxsplit: int = ...,
) -> Iterator[list[_T]]: ...
def split_into(
iterable: Iterable[_T], sizes: Iterable[int | None]
) -> Iterator[list[_T]]: ...
@overload
def padded(
iterable: Iterable[_T],
*,
n: int | None = ...,
next_multiple: bool = ...,
) -> Iterator[_T | None]: ...
@overload
def padded(
iterable: Iterable[_T],
fillvalue: _U,
n: int | None = ...,
next_multiple: bool = ...,
) -> Iterator[_T | _U]: ...
@overload
def repeat_last(iterable: Iterable[_T]) -> Iterator[_T]: ...
@overload
def repeat_last(iterable: Iterable[_T], default: _U) -> Iterator[_T | _U]: ...
def distribute(n: int, iterable: Iterable[_T]) -> list[Iterator[_T]]: ...
@overload
def stagger(
iterable: Iterable[_T],
offsets: _SizedIterable[int] = ...,
longest: bool = ...,
) -> Iterator[tuple[_T | None, ...]]: ...
@overload
def stagger(
iterable: Iterable[_T],
offsets: _SizedIterable[int] = ...,
longest: bool = ...,
fillvalue: _U = ...,
) -> Iterator[tuple[_T | _U, ...]]: ...
class UnequalIterablesError(ValueError):
def __init__(self, details: tuple[int, int, int] | None = ...) -> None: ...
@overload
def zip_equal(__iter1: Iterable[_T1]) -> Iterator[tuple[_T1]]: ...
@overload
def zip_equal(
__iter1: Iterable[_T1], __iter2: Iterable[_T2]
) -> Iterator[tuple[_T1, _T2]]: ...
@overload
def zip_equal(
__iter1: Iterable[_T],
__iter2: Iterable[_T],
__iter3: Iterable[_T],
*iterables: Iterable[_T],
) -> Iterator[tuple[_T, ...]]: ...
@overload
def zip_offset(
__iter1: Iterable[_T1],
*,
offsets: _SizedIterable[int],
longest: bool = ...,
fillvalue: None = None,
) -> Iterator[tuple[_T1 | None]]: ...
@overload
def zip_offset(
__iter1: Iterable[_T1],
__iter2: Iterable[_T2],
*,
offsets: _SizedIterable[int],
longest: bool = ...,
fillvalue: None = None,
) -> Iterator[tuple[_T1 | None, _T2 | None]]: ...
@overload
def zip_offset(
__iter1: Iterable[_T],
__iter2: Iterable[_T],
__iter3: Iterable[_T],
*iterables: Iterable[_T],
offsets: _SizedIterable[int],
longest: bool = ...,
fillvalue: None = None,
) -> Iterator[tuple[_T | None, ...]]: ...
@overload
def zip_offset(
__iter1: Iterable[_T1],
*,
offsets: _SizedIterable[int],
longest: bool = ...,
fillvalue: _U,
) -> Iterator[tuple[_T1 | _U]]: ...
@overload
def zip_offset(
__iter1: Iterable[_T1],
__iter2: Iterable[_T2],
*,
offsets: _SizedIterable[int],
longest: bool = ...,
fillvalue: _U,
) -> Iterator[tuple[_T1 | _U, _T2 | _U]]: ...
@overload
def zip_offset(
__iter1: Iterable[_T],
__iter2: Iterable[_T],
__iter3: Iterable[_T],
*iterables: Iterable[_T],
offsets: _SizedIterable[int],
longest: bool = ...,
fillvalue: _U,
) -> Iterator[tuple[_T | _U, ...]]: ...
def sort_together(
iterables: Iterable[Iterable[_T]],
key_list: Iterable[int] = ...,
key: Callable[..., Any] | None = ...,
reverse: bool = ...,
) -> list[tuple[_T, ...]]: ...
def unzip(iterable: Iterable[Sequence[_T]]) -> tuple[Iterator[_T], ...]: ...
def divide(n: int, iterable: Iterable[_T]) -> list[Iterator[_T]]: ...
def always_iterable(
obj: object,
base_type: type | tuple[type | tuple[Any, ...], ...] | None = ...,
) -> Iterator[Any]: ...
def adjacent(
predicate: Callable[[_T], bool],
iterable: Iterable[_T],
distance: int = ...,
) -> Iterator[tuple[bool, _T]]: ...
@overload
def groupby_transform(
iterable: Iterable[_T],
keyfunc: None = None,
valuefunc: None = None,
reducefunc: None = None,
) -> Iterator[tuple[_T, Iterator[_T]]]: ...
@overload
def groupby_transform(
iterable: Iterable[_T],
keyfunc: Callable[[_T], _U],
valuefunc: None,
reducefunc: None,
) -> Iterator[tuple[_U, Iterator[_T]]]: ...
@overload
def groupby_transform(
iterable: Iterable[_T],
keyfunc: None,
valuefunc: Callable[[_T], _V],
reducefunc: None,
) -> Iterable[tuple[_T, Iterable[_V]]]: ...
@overload
def groupby_transform(
iterable: Iterable[_T],
keyfunc: Callable[[_T], _U],
valuefunc: Callable[[_T], _V],
reducefunc: None,
) -> Iterable[tuple[_U, Iterator[_V]]]: ...
@overload
def groupby_transform(
iterable: Iterable[_T],
keyfunc: None,
valuefunc: None,
reducefunc: Callable[[Iterator[_T]], _W],
) -> Iterable[tuple[_T, _W]]: ...
@overload
def groupby_transform(
iterable: Iterable[_T],
keyfunc: Callable[[_T], _U],
valuefunc: None,
reducefunc: Callable[[Iterator[_T]], _W],
) -> Iterable[tuple[_U, _W]]: ...
@overload
def groupby_transform(
iterable: Iterable[_T],
keyfunc: None,
valuefunc: Callable[[_T], _V],
reducefunc: Callable[[Iterable[_V]], _W],
) -> Iterable[tuple[_T, _W]]: ...
@overload
def groupby_transform(
iterable: Iterable[_T],
keyfunc: Callable[[_T], _U],
valuefunc: Callable[[_T], _V],
reducefunc: Callable[[Iterable[_V]], _W],
) -> Iterable[tuple[_U, _W]]: ...
class numeric_range(Generic[_T, _U], Sequence[_T], Hashable, Reversible[_T]):
@overload
def __init__(self, __stop: _T) -> None: ...
@overload
def __init__(self, __start: _T, __stop: _T) -> None: ...
@overload
def __init__(self, __start: _T, __stop: _T, __step: _U) -> None: ...
def __bool__(self) -> bool: ...
def __contains__(self, elem: object) -> bool: ...
def __eq__(self, other: object) -> bool: ...
@overload
def __getitem__(self, key: int) -> _T: ...
@overload
def __getitem__(self, key: slice) -> numeric_range[_T, _U]: ...
def __hash__(self) -> int: ...
def __iter__(self) -> Iterator[_T]: ...
def __len__(self) -> int: ...
def __reduce__(
self,
) -> tuple[Type[numeric_range[_T, _U]], tuple[_T, _T, _U]]: ...
def __repr__(self) -> str: ...
def __reversed__(self) -> Iterator[_T]: ...
def count(self, value: _T) -> int: ...
def index(self, value: _T) -> int: ... # type: ignore
def count_cycle(
iterable: Iterable[_T], n: int | None = ...
) -> Iterable[tuple[int, _T]]: ...
def mark_ends(
iterable: Iterable[_T],
) -> Iterable[tuple[bool, bool, _T]]: ...
def locate(
iterable: Iterable[object],
pred: Callable[..., Any] = ...,
window_size: int | None = ...,
) -> Iterator[int]: ...
def lstrip(
iterable: Iterable[_T], pred: Callable[[_T], object]
) -> Iterator[_T]: ...
def rstrip(
iterable: Iterable[_T], pred: Callable[[_T], object]
) -> Iterator[_T]: ...
def strip(
iterable: Iterable[_T], pred: Callable[[_T], object]
) -> Iterator[_T]: ...
class islice_extended(Generic[_T], Iterator[_T]):
def __init__(self, iterable: Iterable[_T], *args: int | None) -> None: ...
def __iter__(self) -> islice_extended[_T]: ...
def __next__(self) -> _T: ...
def __getitem__(self, index: slice) -> islice_extended[_T]: ...
def always_reversible(iterable: Iterable[_T]) -> Iterator[_T]: ...
def consecutive_groups(
iterable: Iterable[_T], ordering: Callable[[_T], int] = ...
) -> Iterator[Iterator[_T]]: ...
@overload
def difference(
iterable: Iterable[_T],
func: Callable[[_T, _T], _U] = ...,
*,
initial: None = ...,
) -> Iterator[_T | _U]: ...
@overload
def difference(
iterable: Iterable[_T], func: Callable[[_T, _T], _U] = ..., *, initial: _U
) -> Iterator[_U]: ...
class SequenceView(Generic[_T], Sequence[_T]):
def __init__(self, target: Sequence[_T]) -> None: ...
@overload
def __getitem__(self, index: int) -> _T: ...
@overload
def __getitem__(self, index: slice) -> Sequence[_T]: ...
def __len__(self) -> int: ...
class seekable(Generic[_T], Iterator[_T]):
def __init__(
self, iterable: Iterable[_T], maxlen: int | None = ...
) -> None: ...
def __iter__(self) -> seekable[_T]: ...
def __next__(self) -> _T: ...
def __bool__(self) -> bool: ...
@overload
def peek(self) -> _T: ...
@overload
def peek(self, default: _U) -> _T | _U: ...
def elements(self) -> SequenceView[_T]: ...
def seek(self, index: int) -> None: ...
class run_length:
@staticmethod
def encode(iterable: Iterable[_T]) -> Iterator[tuple[_T, int]]: ...
@staticmethod
def decode(iterable: Iterable[tuple[_T, int]]) -> Iterator[_T]: ...
def exactly_n(
iterable: Iterable[_T], n: int, predicate: Callable[[_T], object] = ...
) -> bool: ...
def circular_shifts(iterable: Iterable[_T]) -> list[tuple[_T, ...]]: ...
def make_decorator(
wrapping_func: Callable[..., _U], result_index: int = ...
) -> Callable[..., Callable[[Callable[..., Any]], Callable[..., _U]]]: ...
@overload
def map_reduce(
iterable: Iterable[_T],
keyfunc: Callable[[_T], _U],
valuefunc: None = ...,
reducefunc: None = ...,
) -> dict[_U, list[_T]]: ...
@overload
def map_reduce(
iterable: Iterable[_T],
keyfunc: Callable[[_T], _U],
valuefunc: Callable[[_T], _V],
reducefunc: None = ...,
) -> dict[_U, list[_V]]: ...
@overload
def map_reduce(
iterable: Iterable[_T],
keyfunc: Callable[[_T], _U],
valuefunc: None = ...,
reducefunc: Callable[[list[_T]], _W] = ...,
) -> dict[_U, _W]: ...
@overload
def map_reduce(
iterable: Iterable[_T],
keyfunc: Callable[[_T], _U],
valuefunc: Callable[[_T], _V],
reducefunc: Callable[[list[_V]], _W],
) -> dict[_U, _W]: ...
def rlocate(
iterable: Iterable[_T],
pred: Callable[..., object] = ...,
window_size: int | None = ...,
) -> Iterator[int]: ...
def replace(
iterable: Iterable[_T],
pred: Callable[..., object],
substitutes: Iterable[_U],
count: int | None = ...,
window_size: int = ...,
) -> Iterator[_T | _U]: ...
def partitions(iterable: Iterable[_T]) -> Iterator[list[list[_T]]]: ...
def set_partitions(
iterable: Iterable[_T], k: int | None = ...
) -> Iterator[list[list[_T]]]: ...
class time_limited(Generic[_T], Iterator[_T]):
def __init__(
self, limit_seconds: float, iterable: Iterable[_T]
) -> None: ...
def __iter__(self) -> islice_extended[_T]: ...
def __next__(self) -> _T: ...
@overload
def only(
iterable: Iterable[_T], *, too_long: _Raisable | None = ...
) -> _T | None: ...
@overload
def only(
iterable: Iterable[_T], default: _U, too_long: _Raisable | None = ...
) -> _T | _U: ...
def ichunked(iterable: Iterable[_T], n: int) -> Iterator[Iterator[_T]]: ...
def distinct_combinations(
iterable: Iterable[_T], r: int
) -> Iterator[tuple[_T, ...]]: ...
def filter_except(
validator: Callable[[Any], object],
iterable: Iterable[_T],
*exceptions: Type[BaseException],
) -> Iterator[_T]: ...
def map_except(
function: Callable[[Any], _U],
iterable: Iterable[_T],
*exceptions: Type[BaseException],
) -> Iterator[_U]: ...
def map_if(
iterable: Iterable[Any],
pred: Callable[[Any], bool],
func: Callable[[Any], Any],
func_else: Callable[[Any], Any] | None = ...,
) -> Iterator[Any]: ...
def sample(
iterable: Iterable[_T],
k: int,
weights: Iterable[float] | None = ...,
) -> list[_T]: ...
def is_sorted(
iterable: Iterable[_T],
key: Callable[[_T], _U] | None = ...,
reverse: bool = False,
strict: bool = False,
) -> bool: ...
class AbortThread(BaseException):
pass
class callback_iter(Generic[_T], Iterator[_T]):
def __init__(
self,
func: Callable[..., Any],
callback_kwd: str = ...,
wait_seconds: float = ...,
) -> None: ...
def __enter__(self) -> callback_iter[_T]: ...
def __exit__(
self,
exc_type: Type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> bool | None: ...
def __iter__(self) -> callback_iter[_T]: ...
def __next__(self) -> _T: ...
def _reader(self) -> Iterator[_T]: ...
@property
def done(self) -> bool: ...
@property
def result(self) -> Any: ...
def windowed_complete(
iterable: Iterable[_T], n: int
) -> Iterator[tuple[_T, ...]]: ...
def all_unique(
iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
) -> bool: ...
def nth_product(index: int, *args: Iterable[_T]) -> tuple[_T, ...]: ...
def nth_permutation(
iterable: Iterable[_T], r: int, index: int
) -> tuple[_T, ...]: ...
def value_chain(*args: _T | Iterable[_T]) -> Iterable[_T]: ...
def product_index(element: Iterable[_T], *args: Iterable[_T]) -> int: ...
def combination_index(
element: Iterable[_T], iterable: Iterable[_T]
) -> int: ...
def permutation_index(
element: Iterable[_T], iterable: Iterable[_T]
) -> int: ...
def repeat_each(iterable: Iterable[_T], n: int = ...) -> Iterator[_T]: ...
class countable(Generic[_T], Iterator[_T]):
def __init__(self, iterable: Iterable[_T]) -> None: ...
def __iter__(self) -> countable[_T]: ...
def __next__(self) -> _T: ...
def chunked_even(iterable: Iterable[_T], n: int) -> Iterator[list[_T]]: ...
def zip_broadcast(
*objects: _T | Iterable[_T],
scalar_types: type | tuple[type | tuple[Any, ...], ...] | None = ...,
strict: bool = ...,
) -> Iterable[tuple[_T, ...]]: ...
def unique_in_window(
iterable: Iterable[_T], n: int, key: Callable[[_T], _U] | None = ...
) -> Iterator[_T]: ...
def duplicates_everseen(
iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
) -> Iterator[_T]: ...
def duplicates_justseen(
iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
) -> Iterator[_T]: ...
class _SupportsLessThan(Protocol):
def __lt__(self, __other: Any) -> bool: ...
_SupportsLessThanT = TypeVar("_SupportsLessThanT", bound=_SupportsLessThan)
@overload
def minmax(
iterable_or_value: Iterable[_SupportsLessThanT], *, key: None = None
) -> tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
@overload
def minmax(
iterable_or_value: Iterable[_T], *, key: Callable[[_T], _SupportsLessThan]
) -> tuple[_T, _T]: ...
@overload
def minmax(
iterable_or_value: Iterable[_SupportsLessThanT],
*,
key: None = None,
default: _U,
) -> _U | tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
@overload
def minmax(
iterable_or_value: Iterable[_T],
*,
key: Callable[[_T], _SupportsLessThan],
default: _U,
) -> _U | tuple[_T, _T]: ...
@overload
def minmax(
iterable_or_value: _SupportsLessThanT,
__other: _SupportsLessThanT,
*others: _SupportsLessThanT,
) -> tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
@overload
def minmax(
iterable_or_value: _T,
__other: _T,
*others: _T,
key: Callable[[_T], _SupportsLessThan],
) -> tuple[_T, _T]: ...
def longest_common_prefix(
iterables: Iterable[Iterable[_T]],
) -> Iterator[_T]: ...
def iequals(*iterables: Iterable[object]) -> bool: ...
def constrained_batches(
iterable: Iterable[object],
max_size: int,
max_count: int | None = ...,
get_len: Callable[[_T], object] = ...,
strict: bool = ...,
) -> Iterator[tuple[_T]]: ...
def gray_product(*iterables: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
| 20,105
|
Python
|
.py
| 636
| 28.268868
| 79
| 0.582386
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,880
|
more.py
|
rembo10_headphones/lib/pkg_resources/_vendor/more_itertools/more.py
|
import warnings
from collections import Counter, defaultdict, deque, abc
from collections.abc import Sequence
from functools import partial, reduce, wraps
from heapq import heapify, heapreplace, heappop
from itertools import (
chain,
compress,
count,
cycle,
dropwhile,
groupby,
islice,
repeat,
starmap,
takewhile,
tee,
zip_longest,
)
from math import exp, factorial, floor, log
from queue import Empty, Queue
from random import random, randrange, uniform
from operator import itemgetter, mul, sub, gt, lt, ge, le
from sys import hexversion, maxsize
from time import monotonic
from .recipes import (
_marker,
_zip_equal,
UnequalIterablesError,
consume,
flatten,
pairwise,
powerset,
take,
unique_everseen,
all_equal,
)
__all__ = [
'AbortThread',
'SequenceView',
'UnequalIterablesError',
'adjacent',
'all_unique',
'always_iterable',
'always_reversible',
'bucket',
'callback_iter',
'chunked',
'chunked_even',
'circular_shifts',
'collapse',
'combination_index',
'consecutive_groups',
'constrained_batches',
'consumer',
'count_cycle',
'countable',
'difference',
'distinct_combinations',
'distinct_permutations',
'distribute',
'divide',
'duplicates_everseen',
'duplicates_justseen',
'exactly_n',
'filter_except',
'first',
'gray_product',
'groupby_transform',
'ichunked',
'iequals',
'ilen',
'interleave',
'interleave_evenly',
'interleave_longest',
'intersperse',
'is_sorted',
'islice_extended',
'iterate',
'last',
'locate',
'longest_common_prefix',
'lstrip',
'make_decorator',
'map_except',
'map_if',
'map_reduce',
'mark_ends',
'minmax',
'nth_or_last',
'nth_permutation',
'nth_product',
'numeric_range',
'one',
'only',
'padded',
'partitions',
'peekable',
'permutation_index',
'product_index',
'raise_',
'repeat_each',
'repeat_last',
'replace',
'rlocate',
'rstrip',
'run_length',
'sample',
'seekable',
'set_partitions',
'side_effect',
'sliced',
'sort_together',
'split_after',
'split_at',
'split_before',
'split_into',
'split_when',
'spy',
'stagger',
'strip',
'strictly_n',
'substrings',
'substrings_indexes',
'time_limited',
'unique_in_window',
'unique_to_each',
'unzip',
'value_chain',
'windowed',
'windowed_complete',
'with_iter',
'zip_broadcast',
'zip_equal',
'zip_offset',
]
def chunked(iterable, n, strict=False):
"""Break *iterable* into lists of length *n*:
>>> list(chunked([1, 2, 3, 4, 5, 6], 3))
[[1, 2, 3], [4, 5, 6]]
By the default, the last yielded list will have fewer than *n* elements
if the length of *iterable* is not divisible by *n*:
>>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
[[1, 2, 3], [4, 5, 6], [7, 8]]
To use a fill-in value instead, see the :func:`grouper` recipe.
If the length of *iterable* is not divisible by *n* and *strict* is
``True``, then ``ValueError`` will be raised before the last
list is yielded.
"""
iterator = iter(partial(take, n, iter(iterable)), [])
if strict:
if n is None:
raise ValueError('n must not be None when using strict mode.')
def ret():
for chunk in iterator:
if len(chunk) != n:
raise ValueError('iterable is not divisible by n.')
yield chunk
return iter(ret())
else:
return iterator
def first(iterable, default=_marker):
"""Return the first item of *iterable*, or *default* if *iterable* is
empty.
>>> first([0, 1, 2, 3])
0
>>> first([], 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
:func:`first` is useful when you have a generator of expensive-to-retrieve
values and want any arbitrary one. It is marginally shorter than
``next(iter(iterable), default)``.
"""
try:
return next(iter(iterable))
except StopIteration as e:
if default is _marker:
raise ValueError(
'first() was called on an empty iterable, and no '
'default value was provided.'
) from e
return default
def last(iterable, default=_marker):
"""Return the last item of *iterable*, or *default* if *iterable* is
empty.
>>> last([0, 1, 2, 3])
3
>>> last([], 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
"""
try:
if isinstance(iterable, Sequence):
return iterable[-1]
# Work around https://bugs.python.org/issue38525
elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0):
return next(reversed(iterable))
else:
return deque(iterable, maxlen=1)[-1]
except (IndexError, TypeError, StopIteration):
if default is _marker:
raise ValueError(
'last() was called on an empty iterable, and no default was '
'provided.'
)
return default
def nth_or_last(iterable, n, default=_marker):
"""Return the nth or the last item of *iterable*,
or *default* if *iterable* is empty.
>>> nth_or_last([0, 1, 2, 3], 2)
2
>>> nth_or_last([0, 1], 2)
1
>>> nth_or_last([], 0, 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
"""
return last(islice(iterable, n + 1), default=default)
class peekable:
"""Wrap an iterator to allow lookahead and prepending elements.
Call :meth:`peek` on the result to get the value that will be returned
by :func:`next`. This won't advance the iterator:
>>> p = peekable(['a', 'b'])
>>> p.peek()
'a'
>>> next(p)
'a'
Pass :meth:`peek` a default value to return that instead of raising
``StopIteration`` when the iterator is exhausted.
>>> p = peekable([])
>>> p.peek('hi')
'hi'
peekables also offer a :meth:`prepend` method, which "inserts" items
at the head of the iterable:
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> p.peek()
11
>>> list(p)
[11, 12, 1, 2, 3]
peekables can be indexed. Index 0 is the item that will be returned by
:func:`next`, index 1 is the item after that, and so on:
The values up to the given index will be cached.
>>> p = peekable(['a', 'b', 'c', 'd'])
>>> p[0]
'a'
>>> p[1]
'b'
>>> next(p)
'a'
Negative indexes are supported, but be aware that they will cache the
remaining items in the source iterator, which may require significant
storage.
To check whether a peekable is exhausted, check its truth value:
>>> p = peekable(['a', 'b'])
>>> if p: # peekable has items
... list(p)
['a', 'b']
>>> if not p: # peekable is exhausted
... list(p)
[]
"""
def __init__(self, iterable):
self._it = iter(iterable)
self._cache = deque()
def __iter__(self):
return self
def __bool__(self):
try:
self.peek()
except StopIteration:
return False
return True
def peek(self, default=_marker):
"""Return the item that will be next returned from ``next()``.
Return ``default`` if there are no items left. If ``default`` is not
provided, raise ``StopIteration``.
"""
if not self._cache:
try:
self._cache.append(next(self._it))
except StopIteration:
if default is _marker:
raise
return default
return self._cache[0]
def prepend(self, *items):
"""Stack up items to be the next ones returned from ``next()`` or
``self.peek()``. The items will be returned in
first in, first out order::
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> list(p)
[11, 12, 1, 2, 3]
It is possible, by prepending items, to "resurrect" a peekable that
previously raised ``StopIteration``.
>>> p = peekable([])
>>> next(p)
Traceback (most recent call last):
...
StopIteration
>>> p.prepend(1)
>>> next(p)
1
>>> next(p)
Traceback (most recent call last):
...
StopIteration
"""
self._cache.extendleft(reversed(items))
def __next__(self):
if self._cache:
return self._cache.popleft()
return next(self._it)
def _get_slice(self, index):
# Normalize the slice's arguments
step = 1 if (index.step is None) else index.step
if step > 0:
start = 0 if (index.start is None) else index.start
stop = maxsize if (index.stop is None) else index.stop
elif step < 0:
start = -1 if (index.start is None) else index.start
stop = (-maxsize - 1) if (index.stop is None) else index.stop
else:
raise ValueError('slice step cannot be zero')
# If either the start or stop index is negative, we'll need to cache
# the rest of the iterable in order to slice from the right side.
if (start < 0) or (stop < 0):
self._cache.extend(self._it)
# Otherwise we'll need to find the rightmost index and cache to that
# point.
else:
n = min(max(start, stop) + 1, maxsize)
cache_len = len(self._cache)
if n >= cache_len:
self._cache.extend(islice(self._it, n - cache_len))
return list(self._cache)[index]
def __getitem__(self, index):
if isinstance(index, slice):
return self._get_slice(index)
cache_len = len(self._cache)
if index < 0:
self._cache.extend(self._it)
elif index >= cache_len:
self._cache.extend(islice(self._it, index + 1 - cache_len))
return self._cache[index]
def consumer(func):
"""Decorator that automatically advances a PEP-342-style "reverse iterator"
to its first yield point so you don't have to call ``next()`` on it
manually.
>>> @consumer
... def tally():
... i = 0
... while True:
... print('Thing number %s is %s.' % (i, (yield)))
... i += 1
...
>>> t = tally()
>>> t.send('red')
Thing number 0 is red.
>>> t.send('fish')
Thing number 1 is fish.
Without the decorator, you would have to call ``next(t)`` before
``t.send()`` could be used.
"""
@wraps(func)
def wrapper(*args, **kwargs):
gen = func(*args, **kwargs)
next(gen)
return gen
return wrapper
def ilen(iterable):
"""Return the number of items in *iterable*.
>>> ilen(x for x in range(1000000) if x % 3 == 0)
333334
This consumes the iterable, so handle with care.
"""
# This approach was selected because benchmarks showed it's likely the
# fastest of the known implementations at the time of writing.
# See GitHub tracker: #236, #230.
counter = count()
deque(zip(iterable, counter), maxlen=0)
return next(counter)
def iterate(func, start):
"""Return ``start``, ``func(start)``, ``func(func(start))``, ...
>>> from itertools import islice
>>> list(islice(iterate(lambda x: 2*x, 1), 10))
[1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
"""
while True:
yield start
start = func(start)
def with_iter(context_manager):
"""Wrap an iterable in a ``with`` statement, so it closes once exhausted.
For example, this will close the file when the iterator is exhausted::
upper_lines = (line.upper() for line in with_iter(open('foo')))
Any context manager which returns an iterable is a candidate for
``with_iter``.
"""
with context_manager as iterable:
yield from iterable
def one(iterable, too_short=None, too_long=None):
"""Return the first item from *iterable*, which is expected to contain only
that item. Raise an exception if *iterable* is empty or has more than one
item.
:func:`one` is useful for ensuring that an iterable contains only one item.
For example, it can be used to retrieve the result of a database query
that is expected to return a single row.
If *iterable* is empty, ``ValueError`` will be raised. You may specify a
different exception with the *too_short* keyword:
>>> it = []
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (expected 1)'
>>> too_short = IndexError('too few items')
>>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
IndexError: too few items
Similarly, if *iterable* contains more than one item, ``ValueError`` will
be raised. You may specify a different exception with the *too_long*
keyword:
>>> it = ['too', 'many']
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Expected exactly one item in iterable, but got 'too',
'many', and perhaps more.
>>> too_long = RuntimeError
>>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
Note that :func:`one` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check iterable
contents less destructively.
"""
it = iter(iterable)
try:
first_value = next(it)
except StopIteration as e:
raise (
too_short or ValueError('too few items in iterable (expected 1)')
) from e
try:
second_value = next(it)
except StopIteration:
pass
else:
msg = (
'Expected exactly one item in iterable, but got {!r}, {!r}, '
'and perhaps more.'.format(first_value, second_value)
)
raise too_long or ValueError(msg)
return first_value
def raise_(exception, *args):
raise exception(*args)
def strictly_n(iterable, n, too_short=None, too_long=None):
"""Validate that *iterable* has exactly *n* items and return them if
it does. If it has fewer than *n* items, call function *too_short*
with those items. If it has more than *n* items, call function
*too_long* with the first ``n + 1`` items.
>>> iterable = ['a', 'b', 'c', 'd']
>>> n = 4
>>> list(strictly_n(iterable, n))
['a', 'b', 'c', 'd']
By default, *too_short* and *too_long* are functions that raise
``ValueError``.
>>> list(strictly_n('ab', 3)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too few items in iterable (got 2)
>>> list(strictly_n('abc', 2)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (got at least 3)
You can instead supply functions that do something else.
*too_short* will be called with the number of items in *iterable*.
*too_long* will be called with `n + 1`.
>>> def too_short(item_count):
... raise RuntimeError
>>> it = strictly_n('abcd', 6, too_short=too_short)
>>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
>>> def too_long(item_count):
... print('The boss is going to hear about this')
>>> it = strictly_n('abcdef', 4, too_long=too_long)
>>> list(it)
The boss is going to hear about this
['a', 'b', 'c', 'd']
"""
if too_short is None:
too_short = lambda item_count: raise_(
ValueError,
'Too few items in iterable (got {})'.format(item_count),
)
if too_long is None:
too_long = lambda item_count: raise_(
ValueError,
'Too many items in iterable (got at least {})'.format(item_count),
)
it = iter(iterable)
for i in range(n):
try:
item = next(it)
except StopIteration:
too_short(i)
return
else:
yield item
try:
next(it)
except StopIteration:
pass
else:
too_long(n + 1)
def distinct_permutations(iterable, r=None):
"""Yield successive distinct permutations of the elements in *iterable*.
>>> sorted(distinct_permutations([1, 0, 1]))
[(0, 1, 1), (1, 0, 1), (1, 1, 0)]
Equivalent to ``set(permutations(iterable))``, except duplicates are not
generated and thrown away. For larger input sequences this is much more
efficient.
Duplicate permutations arise when there are duplicated elements in the
input iterable. The number of items returned is
`n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
items input, and each `x_i` is the count of a distinct item in the input
sequence.
If *r* is given, only the *r*-length permutations are yielded.
>>> sorted(distinct_permutations([1, 0, 1], r=2))
[(0, 1), (1, 0), (1, 1)]
>>> sorted(distinct_permutations(range(3), r=2))
[(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
"""
# Algorithm: https://w.wiki/Qai
def _full(A):
while True:
# Yield the permutation we have
yield tuple(A)
# Find the largest index i such that A[i] < A[i + 1]
for i in range(size - 2, -1, -1):
if A[i] < A[i + 1]:
break
# If no such index exists, this permutation is the last one
else:
return
# Find the largest index j greater than j such that A[i] < A[j]
for j in range(size - 1, i, -1):
if A[i] < A[j]:
break
# Swap the value of A[i] with that of A[j], then reverse the
# sequence from A[i + 1] to form the new permutation
A[i], A[j] = A[j], A[i]
A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1]
# Algorithm: modified from the above
def _partial(A, r):
# Split A into the first r items and the last r items
head, tail = A[:r], A[r:]
right_head_indexes = range(r - 1, -1, -1)
left_tail_indexes = range(len(tail))
while True:
# Yield the permutation we have
yield tuple(head)
# Starting from the right, find the first index of the head with
# value smaller than the maximum value of the tail - call it i.
pivot = tail[-1]
for i in right_head_indexes:
if head[i] < pivot:
break
pivot = head[i]
else:
return
# Starting from the left, find the first value of the tail
# with a value greater than head[i] and swap.
for j in left_tail_indexes:
if tail[j] > head[i]:
head[i], tail[j] = tail[j], head[i]
break
# If we didn't find one, start from the right and find the first
# index of the head with a value greater than head[i] and swap.
else:
for j in right_head_indexes:
if head[j] > head[i]:
head[i], head[j] = head[j], head[i]
break
# Reverse head[i + 1:] and swap it with tail[:r - (i + 1)]
tail += head[: i - r : -1] # head[i + 1:][::-1]
i += 1
head[i:], tail[:] = tail[: r - i], tail[r - i :]
items = sorted(iterable)
size = len(items)
if r is None:
r = size
if 0 < r <= size:
return _full(items) if (r == size) else _partial(items, r)
return iter(() if r else ((),))
def intersperse(e, iterable, n=1):
"""Intersperse filler element *e* among the items in *iterable*, leaving
*n* items between each filler element.
>>> list(intersperse('!', [1, 2, 3, 4, 5]))
[1, '!', 2, '!', 3, '!', 4, '!', 5]
>>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
[1, 2, None, 3, 4, None, 5]
"""
if n == 0:
raise ValueError('n must be > 0')
elif n == 1:
# interleave(repeat(e), iterable) -> e, x_0, e, x_1, e, x_2...
# islice(..., 1, None) -> x_0, e, x_1, e, x_2...
return islice(interleave(repeat(e), iterable), 1, None)
else:
# interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]...
# islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]...
# flatten(...) -> x_0, x_1, e, x_2, x_3...
filler = repeat([e])
chunks = chunked(iterable, n)
return flatten(islice(interleave(filler, chunks), 1, None))
def unique_to_each(*iterables):
"""Return the elements from each of the input iterables that aren't in the
other input iterables.
For example, suppose you have a set of packages, each with a set of
dependencies::
{'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}}
If you remove one package, which dependencies can also be removed?
If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not
associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for
``pkg_2``, and ``D`` is only needed for ``pkg_3``::
>>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'})
[['A'], ['C'], ['D']]
If there are duplicates in one input iterable that aren't in the others
they will be duplicated in the output. Input order is preserved::
>>> unique_to_each("mississippi", "missouri")
[['p', 'p'], ['o', 'u', 'r']]
It is assumed that the elements of each iterable are hashable.
"""
pool = [list(it) for it in iterables]
counts = Counter(chain.from_iterable(map(set, pool)))
uniques = {element for element in counts if counts[element] == 1}
return [list(filter(uniques.__contains__, it)) for it in pool]
def windowed(seq, n, fillvalue=None, step=1):
"""Return a sliding window of width *n* over the given iterable.
>>> all_windows = windowed([1, 2, 3, 4, 5], 3)
>>> list(all_windows)
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
When the window is larger than the iterable, *fillvalue* is used in place
of missing values:
>>> list(windowed([1, 2, 3], 4))
[(1, 2, 3, None)]
Each window will advance in increments of *step*:
>>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
[(1, 2, 3), (3, 4, 5), (5, 6, '!')]
To slide into the iterable's items, use :func:`chain` to add filler items
to the left:
>>> iterable = [1, 2, 3, 4]
>>> n = 3
>>> padding = [None] * (n - 1)
>>> list(windowed(chain(padding, iterable), 3))
[(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)]
"""
if n < 0:
raise ValueError('n must be >= 0')
if n == 0:
yield tuple()
return
if step < 1:
raise ValueError('step must be >= 1')
window = deque(maxlen=n)
i = n
for _ in map(window.append, seq):
i -= 1
if not i:
i = step
yield tuple(window)
size = len(window)
if size == 0:
return
elif size < n:
yield tuple(chain(window, repeat(fillvalue, n - size)))
elif 0 < i < min(step, n):
window += (fillvalue,) * i
yield tuple(window)
def substrings(iterable):
"""Yield all of the substrings of *iterable*.
>>> [''.join(s) for s in substrings('more')]
['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more']
Note that non-string iterables can also be subdivided.
>>> list(substrings([0, 1, 2]))
[(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)]
"""
# The length-1 substrings
seq = []
for item in iter(iterable):
seq.append(item)
yield (item,)
seq = tuple(seq)
item_count = len(seq)
# And the rest
for n in range(2, item_count + 1):
for i in range(item_count - n + 1):
yield seq[i : i + n]
def substrings_indexes(seq, reverse=False):
"""Yield all substrings and their positions in *seq*
The items yielded will be a tuple of the form ``(substr, i, j)``, where
``substr == seq[i:j]``.
This function only works for iterables that support slicing, such as
``str`` objects.
>>> for item in substrings_indexes('more'):
... print(item)
('m', 0, 1)
('o', 1, 2)
('r', 2, 3)
('e', 3, 4)
('mo', 0, 2)
('or', 1, 3)
('re', 2, 4)
('mor', 0, 3)
('ore', 1, 4)
('more', 0, 4)
Set *reverse* to ``True`` to yield the same items in the opposite order.
"""
r = range(1, len(seq) + 1)
if reverse:
r = reversed(r)
return (
(seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
)
class bucket:
"""Wrap *iterable* and return an object that buckets it iterable into
child iterables based on a *key* function.
>>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3']
>>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character
>>> sorted(list(s)) # Get the keys
['a', 'b', 'c']
>>> a_iterable = s['a']
>>> next(a_iterable)
'a1'
>>> next(a_iterable)
'a2'
>>> list(s['b'])
['b1', 'b2', 'b3']
The original iterable will be advanced and its items will be cached until
they are used by the child iterables. This may require significant storage.
By default, attempting to select a bucket to which no items belong will
exhaust the iterable and cache all values.
If you specify a *validator* function, selected buckets will instead be
checked against it.
>>> from itertools import count
>>> it = count(1, 2) # Infinite sequence of odd numbers
>>> key = lambda x: x % 10 # Bucket by last digit
>>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only
>>> s = bucket(it, key=key, validator=validator)
>>> 2 in s
False
>>> list(s[2])
[]
"""
def __init__(self, iterable, key, validator=None):
self._it = iter(iterable)
self._key = key
self._cache = defaultdict(deque)
self._validator = validator or (lambda x: True)
def __contains__(self, value):
if not self._validator(value):
return False
try:
item = next(self[value])
except StopIteration:
return False
else:
self._cache[value].appendleft(item)
return True
def _get_values(self, value):
"""
Helper to yield items from the parent iterator that match *value*.
Items that don't match are stored in the local cache as they
are encountered.
"""
while True:
# If we've cached some items that match the target value, emit
# the first one and evict it from the cache.
if self._cache[value]:
yield self._cache[value].popleft()
# Otherwise we need to advance the parent iterator to search for
# a matching item, caching the rest.
else:
while True:
try:
item = next(self._it)
except StopIteration:
return
item_value = self._key(item)
if item_value == value:
yield item
break
elif self._validator(item_value):
self._cache[item_value].append(item)
def __iter__(self):
for item in self._it:
item_value = self._key(item)
if self._validator(item_value):
self._cache[item_value].append(item)
yield from self._cache.keys()
def __getitem__(self, value):
if not self._validator(value):
return iter(())
return self._get_values(value)
def spy(iterable, n=1):
"""Return a 2-tuple with a list containing the first *n* elements of
*iterable*, and an iterator with the same items as *iterable*.
This allows you to "look ahead" at the items in the iterable without
advancing it.
There is one item in the list by default:
>>> iterable = 'abcdefg'
>>> head, iterable = spy(iterable)
>>> head
['a']
>>> list(iterable)
['a', 'b', 'c', 'd', 'e', 'f', 'g']
You may use unpacking to retrieve items instead of lists:
>>> (head,), iterable = spy('abcdefg')
>>> head
'a'
>>> (first, second), iterable = spy('abcdefg', 2)
>>> first
'a'
>>> second
'b'
The number of items requested can be larger than the number of items in
the iterable:
>>> iterable = [1, 2, 3, 4, 5]
>>> head, iterable = spy(iterable, 10)
>>> head
[1, 2, 3, 4, 5]
>>> list(iterable)
[1, 2, 3, 4, 5]
"""
it = iter(iterable)
head = take(n, it)
return head.copy(), chain(head, it)
def interleave(*iterables):
"""Return a new iterable yielding from each iterable in turn,
until the shortest is exhausted.
>>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7]
For a version that doesn't terminate after the shortest iterable is
exhausted, see :func:`interleave_longest`.
"""
return chain.from_iterable(zip(*iterables))
def interleave_longest(*iterables):
"""Return a new iterable yielding from each iterable in turn,
skipping any that are exhausted.
>>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7, 3, 8]
This function produces the same output as :func:`roundrobin`, but may
perform better for some inputs (in particular when the number of iterables
is large).
"""
i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker))
return (x for x in i if x is not _marker)
def interleave_evenly(iterables, lengths=None):
"""
Interleave multiple iterables so that their elements are evenly distributed
throughout the output sequence.
>>> iterables = [1, 2, 3, 4, 5], ['a', 'b']
>>> list(interleave_evenly(iterables))
[1, 2, 'a', 3, 4, 'b', 5]
>>> iterables = [[1, 2, 3], [4, 5], [6, 7, 8]]
>>> list(interleave_evenly(iterables))
[1, 6, 4, 2, 7, 3, 8, 5]
This function requires iterables of known length. Iterables without
``__len__()`` can be used by manually specifying lengths with *lengths*:
>>> from itertools import combinations, repeat
>>> iterables = [combinations(range(4), 2), ['a', 'b', 'c']]
>>> lengths = [4 * (4 - 1) // 2, 3]
>>> list(interleave_evenly(iterables, lengths=lengths))
[(0, 1), (0, 2), 'a', (0, 3), (1, 2), 'b', (1, 3), (2, 3), 'c']
Based on Bresenham's algorithm.
"""
if lengths is None:
try:
lengths = [len(it) for it in iterables]
except TypeError:
raise ValueError(
'Iterable lengths could not be determined automatically. '
'Specify them with the lengths keyword.'
)
elif len(iterables) != len(lengths):
raise ValueError('Mismatching number of iterables and lengths.')
dims = len(lengths)
# sort iterables by length, descending
lengths_permute = sorted(
range(dims), key=lambda i: lengths[i], reverse=True
)
lengths_desc = [lengths[i] for i in lengths_permute]
iters_desc = [iter(iterables[i]) for i in lengths_permute]
# the longest iterable is the primary one (Bresenham: the longest
# distance along an axis)
delta_primary, deltas_secondary = lengths_desc[0], lengths_desc[1:]
iter_primary, iters_secondary = iters_desc[0], iters_desc[1:]
errors = [delta_primary // dims] * len(deltas_secondary)
to_yield = sum(lengths)
while to_yield:
yield next(iter_primary)
to_yield -= 1
# update errors for each secondary iterable
errors = [e - delta for e, delta in zip(errors, deltas_secondary)]
# those iterables for which the error is negative are yielded
# ("diagonal step" in Bresenham)
for i, e in enumerate(errors):
if e < 0:
yield next(iters_secondary[i])
to_yield -= 1
errors[i] += delta_primary
def collapse(iterable, base_type=None, levels=None):
"""Flatten an iterable with multiple levels of nesting (e.g., a list of
lists of tuples) into non-iterable types.
>>> iterable = [(1, 2), ([3, 4], [[5], [6]])]
>>> list(collapse(iterable))
[1, 2, 3, 4, 5, 6]
Binary and text strings are not considered iterable and
will not be collapsed.
To avoid collapsing other types, specify *base_type*:
>>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']]
>>> list(collapse(iterable, base_type=tuple))
['ab', ('cd', 'ef'), 'gh', 'ij']
Specify *levels* to stop flattening after a certain level:
>>> iterable = [('a', ['b']), ('c', ['d'])]
>>> list(collapse(iterable)) # Fully flattened
['a', 'b', 'c', 'd']
>>> list(collapse(iterable, levels=1)) # Only one level flattened
['a', ['b'], 'c', ['d']]
"""
def walk(node, level):
if (
((levels is not None) and (level > levels))
or isinstance(node, (str, bytes))
or ((base_type is not None) and isinstance(node, base_type))
):
yield node
return
try:
tree = iter(node)
except TypeError:
yield node
return
else:
for child in tree:
yield from walk(child, level + 1)
yield from walk(iterable, 0)
def side_effect(func, iterable, chunk_size=None, before=None, after=None):
"""Invoke *func* on each item in *iterable* (or on each *chunk_size* group
of items) before yielding the item.
`func` must be a function that takes a single argument. Its return value
will be discarded.
*before* and *after* are optional functions that take no arguments. They
will be executed before iteration starts and after it ends, respectively.
`side_effect` can be used for logging, updating progress bars, or anything
that is not functionally "pure."
Emitting a status message:
>>> from more_itertools import consume
>>> func = lambda item: print('Received {}'.format(item))
>>> consume(side_effect(func, range(2)))
Received 0
Received 1
Operating on chunks of items:
>>> pair_sums = []
>>> func = lambda chunk: pair_sums.append(sum(chunk))
>>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2))
[0, 1, 2, 3, 4, 5]
>>> list(pair_sums)
[1, 5, 9]
Writing to a file-like object:
>>> from io import StringIO
>>> from more_itertools import consume
>>> f = StringIO()
>>> func = lambda x: print(x, file=f)
>>> before = lambda: print(u'HEADER', file=f)
>>> after = f.close
>>> it = [u'a', u'b', u'c']
>>> consume(side_effect(func, it, before=before, after=after))
>>> f.closed
True
"""
try:
if before is not None:
before()
if chunk_size is None:
for item in iterable:
func(item)
yield item
else:
for chunk in chunked(iterable, chunk_size):
func(chunk)
yield from chunk
finally:
if after is not None:
after()
def sliced(seq, n, strict=False):
"""Yield slices of length *n* from the sequence *seq*.
>>> list(sliced((1, 2, 3, 4, 5, 6), 3))
[(1, 2, 3), (4, 5, 6)]
By the default, the last yielded slice will have fewer than *n* elements
if the length of *seq* is not divisible by *n*:
>>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3))
[(1, 2, 3), (4, 5, 6), (7, 8)]
If the length of *seq* is not divisible by *n* and *strict* is
``True``, then ``ValueError`` will be raised before the last
slice is yielded.
This function will only work for iterables that support slicing.
For non-sliceable iterables, see :func:`chunked`.
"""
iterator = takewhile(len, (seq[i : i + n] for i in count(0, n)))
if strict:
def ret():
for _slice in iterator:
if len(_slice) != n:
raise ValueError("seq is not divisible by n.")
yield _slice
return iter(ret())
else:
return iterator
def split_at(iterable, pred, maxsplit=-1, keep_separator=False):
"""Yield lists of items from *iterable*, where each list is delimited by
an item where callable *pred* returns ``True``.
>>> list(split_at('abcdcba', lambda x: x == 'b'))
[['a'], ['c', 'd', 'c'], ['a']]
>>> list(split_at(range(10), lambda n: n % 2 == 1))
[[0], [2], [4], [6], [8], []]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2))
[[0], [2], [4, 5, 6, 7, 8, 9]]
By default, the delimiting items are not included in the output.
To include them, set *keep_separator* to ``True``.
>>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True))
[['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
if pred(item):
yield buf
if keep_separator:
yield [item]
if maxsplit == 1:
yield list(it)
return
buf = []
maxsplit -= 1
else:
buf.append(item)
yield buf
def split_before(iterable, pred, maxsplit=-1):
"""Yield lists of items from *iterable*, where each list ends just before
an item for which callable *pred* returns ``True``:
>>> list(split_before('OneTwo', lambda s: s.isupper()))
[['O', 'n', 'e'], ['T', 'w', 'o']]
>>> list(split_before(range(10), lambda n: n % 3 == 0))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2))
[[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
if pred(item) and buf:
yield buf
if maxsplit == 1:
yield [item] + list(it)
return
buf = []
maxsplit -= 1
buf.append(item)
if buf:
yield buf
def split_after(iterable, pred, maxsplit=-1):
"""Yield lists of items from *iterable*, where each list ends with an
item where callable *pred* returns ``True``:
>>> list(split_after('one1two2', lambda s: s.isdigit()))
[['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']]
>>> list(split_after(range(10), lambda n: n % 3 == 0))
[[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2))
[[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
buf.append(item)
if pred(item) and buf:
yield buf
if maxsplit == 1:
buf = list(it)
if buf:
yield buf
return
buf = []
maxsplit -= 1
if buf:
yield buf
def split_when(iterable, pred, maxsplit=-1):
"""Split *iterable* into pieces based on the output of *pred*.
*pred* should be a function that takes successive pairs of items and
returns ``True`` if the iterable should be split in between them.
For example, to find runs of increasing numbers, split the iterable when
element ``i`` is larger than element ``i + 1``:
>>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y))
[[1, 2, 3, 3], [2, 5], [2, 4], [2]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2],
... lambda x, y: x > y, maxsplit=2))
[[1, 2, 3, 3], [2, 5], [2, 4, 2]]
"""
if maxsplit == 0:
yield list(iterable)
return
it = iter(iterable)
try:
cur_item = next(it)
except StopIteration:
return
buf = [cur_item]
for next_item in it:
if pred(cur_item, next_item):
yield buf
if maxsplit == 1:
yield [next_item] + list(it)
return
buf = []
maxsplit -= 1
buf.append(next_item)
cur_item = next_item
yield buf
def split_into(iterable, sizes):
"""Yield a list of sequential items from *iterable* of length 'n' for each
integer 'n' in *sizes*.
>>> list(split_into([1,2,3,4,5,6], [1,2,3]))
[[1], [2, 3], [4, 5, 6]]
If the sum of *sizes* is smaller than the length of *iterable*, then the
remaining items of *iterable* will not be returned.
>>> list(split_into([1,2,3,4,5,6], [2,3]))
[[1, 2], [3, 4, 5]]
If the sum of *sizes* is larger than the length of *iterable*, fewer items
will be returned in the iteration that overruns *iterable* and further
lists will be empty:
>>> list(split_into([1,2,3,4], [1,2,3,4]))
[[1], [2, 3], [4], []]
When a ``None`` object is encountered in *sizes*, the returned list will
contain items up to the end of *iterable* the same way that itertools.slice
does:
>>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None]))
[[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]]
:func:`split_into` can be useful for grouping a series of items where the
sizes of the groups are not uniform. An example would be where in a row
from a table, multiple columns represent elements of the same feature
(e.g. a point represented by x,y,z) but, the format is not the same for
all columns.
"""
# convert the iterable argument into an iterator so its contents can
# be consumed by islice in case it is a generator
it = iter(iterable)
for size in sizes:
if size is None:
yield list(it)
return
else:
yield list(islice(it, size))
def padded(iterable, fillvalue=None, n=None, next_multiple=False):
"""Yield the elements from *iterable*, followed by *fillvalue*, such that
at least *n* items are emitted.
>>> list(padded([1, 2, 3], '?', 5))
[1, 2, 3, '?', '?']
If *next_multiple* is ``True``, *fillvalue* will be emitted until the
number of items emitted is a multiple of *n*::
>>> list(padded([1, 2, 3, 4], n=3, next_multiple=True))
[1, 2, 3, 4, None, None]
If *n* is ``None``, *fillvalue* will be emitted indefinitely.
"""
it = iter(iterable)
if n is None:
yield from chain(it, repeat(fillvalue))
elif n < 1:
raise ValueError('n must be at least 1')
else:
item_count = 0
for item in it:
yield item
item_count += 1
remaining = (n - item_count) % n if next_multiple else n - item_count
for _ in range(remaining):
yield fillvalue
def repeat_each(iterable, n=2):
"""Repeat each element in *iterable* *n* times.
>>> list(repeat_each('ABC', 3))
['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C']
"""
return chain.from_iterable(map(repeat, iterable, repeat(n)))
def repeat_last(iterable, default=None):
"""After the *iterable* is exhausted, keep yielding its last element.
>>> list(islice(repeat_last(range(3)), 5))
[0, 1, 2, 2, 2]
If the iterable is empty, yield *default* forever::
>>> list(islice(repeat_last(range(0), 42), 5))
[42, 42, 42, 42, 42]
"""
item = _marker
for item in iterable:
yield item
final = default if item is _marker else item
yield from repeat(final)
def distribute(n, iterable):
"""Distribute the items from *iterable* among *n* smaller iterables.
>>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 3, 5]
>>> list(group_2)
[2, 4, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 4, 7], [2, 5], [3, 6]]
If the length of *iterable* is smaller than *n*, then the last returned
iterables will be empty:
>>> children = distribute(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function uses :func:`itertools.tee` and may require significant
storage. If you need the order items in the smaller iterables to match the
original iterable, see :func:`divide`.
"""
if n < 1:
raise ValueError('n must be at least 1')
children = tee(iterable, n)
return [islice(it, index, None, n) for index, it in enumerate(children)]
def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None):
"""Yield tuples whose elements are offset from *iterable*.
The amount by which the `i`-th item in each tuple is offset is given by
the `i`-th item in *offsets*.
>>> list(stagger([0, 1, 2, 3]))
[(None, 0, 1), (0, 1, 2), (1, 2, 3)]
>>> list(stagger(range(8), offsets=(0, 2, 4)))
[(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)]
By default, the sequence will end when the final element of a tuple is the
last item in the iterable. To continue until the first element of a tuple
is the last item in the iterable, set *longest* to ``True``::
>>> list(stagger([0, 1, 2, 3], longest=True))
[(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)]
By default, ``None`` will be used to replace offsets beyond the end of the
sequence. Specify *fillvalue* to use some other value.
"""
children = tee(iterable, len(offsets))
return zip_offset(
*children, offsets=offsets, longest=longest, fillvalue=fillvalue
)
def zip_equal(*iterables):
"""``zip`` the input *iterables* together, but raise
``UnequalIterablesError`` if they aren't all the same length.
>>> it_1 = range(3)
>>> it_2 = iter('abc')
>>> list(zip_equal(it_1, it_2))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> it_1 = range(3)
>>> it_2 = iter('abcd')
>>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
more_itertools.more.UnequalIterablesError: Iterables have different
lengths
"""
if hexversion >= 0x30A00A6:
warnings.warn(
(
'zip_equal will be removed in a future version of '
'more-itertools. Use the builtin zip function with '
'strict=True instead.'
),
DeprecationWarning,
)
return _zip_equal(*iterables)
def zip_offset(*iterables, offsets, longest=False, fillvalue=None):
"""``zip`` the input *iterables* together, but offset the `i`-th iterable
by the `i`-th item in *offsets*.
>>> list(zip_offset('0123', 'abcdef', offsets=(0, 1)))
[('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')]
This can be used as a lightweight alternative to SciPy or pandas to analyze
data sets in which some series have a lead or lag relationship.
By default, the sequence will end when the shortest iterable is exhausted.
To continue until the longest iterable is exhausted, set *longest* to
``True``.
>>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True))
[('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')]
By default, ``None`` will be used to replace offsets beyond the end of the
sequence. Specify *fillvalue* to use some other value.
"""
if len(iterables) != len(offsets):
raise ValueError("Number of iterables and offsets didn't match")
staggered = []
for it, n in zip(iterables, offsets):
if n < 0:
staggered.append(chain(repeat(fillvalue, -n), it))
elif n > 0:
staggered.append(islice(it, n, None))
else:
staggered.append(it)
if longest:
return zip_longest(*staggered, fillvalue=fillvalue)
return zip(*staggered)
def sort_together(iterables, key_list=(0,), key=None, reverse=False):
"""Return the input iterables sorted together, with *key_list* as the
priority for sorting. All iterables are trimmed to the length of the
shortest one.
This can be used like the sorting function in a spreadsheet. If each
iterable represents a column of data, the key list determines which
columns are used for sorting.
By default, all iterables are sorted using the ``0``-th iterable::
>>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
>>> sort_together(iterables)
[(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
Set a different key list to sort according to another iterable.
Specifying multiple keys dictates how ties are broken::
>>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
>>> sort_together(iterables, key_list=(1, 2))
[(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
To sort by a function of the elements of the iterable, pass a *key*
function. Its arguments are the elements of the iterables corresponding to
the key list::
>>> names = ('a', 'b', 'c')
>>> lengths = (1, 2, 3)
>>> widths = (5, 2, 1)
>>> def area(length, width):
... return length * width
>>> sort_together([names, lengths, widths], key_list=(1, 2), key=area)
[('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)]
Set *reverse* to ``True`` to sort in descending order.
>>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
[(3, 2, 1), ('a', 'b', 'c')]
"""
if key is None:
# if there is no key function, the key argument to sorted is an
# itemgetter
key_argument = itemgetter(*key_list)
else:
# if there is a key function, call it with the items at the offsets
# specified by the key function as arguments
key_list = list(key_list)
if len(key_list) == 1:
# if key_list contains a single item, pass the item at that offset
# as the only argument to the key function
key_offset = key_list[0]
key_argument = lambda zipped_items: key(zipped_items[key_offset])
else:
# if key_list contains multiple items, use itemgetter to return a
# tuple of items, which we pass as *args to the key function
get_key_items = itemgetter(*key_list)
key_argument = lambda zipped_items: key(
*get_key_items(zipped_items)
)
return list(
zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse))
)
def unzip(iterable):
"""The inverse of :func:`zip`, this function disaggregates the elements
of the zipped *iterable*.
The ``i``-th iterable contains the ``i``-th element from each element
of the zipped iterable. The first element is used to determine the
length of the remaining elements.
>>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
>>> letters, numbers = unzip(iterable)
>>> list(letters)
['a', 'b', 'c', 'd']
>>> list(numbers)
[1, 2, 3, 4]
This is similar to using ``zip(*iterable)``, but it avoids reading
*iterable* into memory. Note, however, that this function uses
:func:`itertools.tee` and thus may require significant storage.
"""
head, iterable = spy(iter(iterable))
if not head:
# empty iterable, e.g. zip([], [], [])
return ()
# spy returns a one-length iterable as head
head = head[0]
iterables = tee(iterable, len(head))
def itemgetter(i):
def getter(obj):
try:
return obj[i]
except IndexError:
# basically if we have an iterable like
# iter([(1, 2, 3), (4, 5), (6,)])
# the second unzipped iterable would fail at the third tuple
# since it would try to access tup[1]
# same with the third unzipped iterable and the second tuple
# to support these "improperly zipped" iterables,
# we create a custom itemgetter
# which just stops the unzipped iterables
# at first length mismatch
raise StopIteration
return getter
return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables))
def divide(n, iterable):
"""Divide the elements from *iterable* into *n* parts, maintaining
order.
>>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 2, 3]
>>> list(group_2)
[4, 5, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 2, 3], [4, 5], [6, 7]]
If the length of the iterable is smaller than n, then the last returned
iterables will be empty:
>>> children = divide(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function will exhaust the iterable before returning and may require
significant storage. If order is not important, see :func:`distribute`,
which does not first pull the iterable into memory.
"""
if n < 1:
raise ValueError('n must be at least 1')
try:
iterable[:0]
except TypeError:
seq = tuple(iterable)
else:
seq = iterable
q, r = divmod(len(seq), n)
ret = []
stop = 0
for i in range(1, n + 1):
start = stop
stop += q + 1 if i <= r else q
ret.append(iter(seq[start:stop]))
return ret
def always_iterable(obj, base_type=(str, bytes)):
"""If *obj* is iterable, return an iterator over its items::
>>> obj = (1, 2, 3)
>>> list(always_iterable(obj))
[1, 2, 3]
If *obj* is not iterable, return a one-item iterable containing *obj*::
>>> obj = 1
>>> list(always_iterable(obj))
[1]
If *obj* is ``None``, return an empty iterable:
>>> obj = None
>>> list(always_iterable(None))
[]
By default, binary and text strings are not considered iterable::
>>> obj = 'foo'
>>> list(always_iterable(obj))
['foo']
If *base_type* is set, objects for which ``isinstance(obj, base_type)``
returns ``True`` won't be considered iterable.
>>> obj = {'a': 1}
>>> list(always_iterable(obj)) # Iterate over the dict's keys
['a']
>>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
[{'a': 1}]
Set *base_type* to ``None`` to avoid any special handling and treat objects
Python considers iterable as iterable:
>>> obj = 'foo'
>>> list(always_iterable(obj, base_type=None))
['f', 'o', 'o']
"""
if obj is None:
return iter(())
if (base_type is not None) and isinstance(obj, base_type):
return iter((obj,))
try:
return iter(obj)
except TypeError:
return iter((obj,))
def adjacent(predicate, iterable, distance=1):
"""Return an iterable over `(bool, item)` tuples where the `item` is
drawn from *iterable* and the `bool` indicates whether
that item satisfies the *predicate* or is adjacent to an item that does.
For example, to find whether items are adjacent to a ``3``::
>>> list(adjacent(lambda x: x == 3, range(6)))
[(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)]
Set *distance* to change what counts as adjacent. For example, to find
whether items are two places away from a ``3``:
>>> list(adjacent(lambda x: x == 3, range(6), distance=2))
[(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)]
This is useful for contextualizing the results of a search function.
For example, a code comparison tool might want to identify lines that
have changed, but also surrounding lines to give the viewer of the diff
context.
The predicate function will only be called once for each item in the
iterable.
See also :func:`groupby_transform`, which can be used with this function
to group ranges of items with the same `bool` value.
"""
# Allow distance=0 mainly for testing that it reproduces results with map()
if distance < 0:
raise ValueError('distance must be at least 0')
i1, i2 = tee(iterable)
padding = [False] * distance
selected = chain(padding, map(predicate, i1), padding)
adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1))
return zip(adjacent_to_selected, i2)
def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None):
"""An extension of :func:`itertools.groupby` that can apply transformations
to the grouped data.
* *keyfunc* is a function computing a key value for each item in *iterable*
* *valuefunc* is a function that transforms the individual items from
*iterable* after grouping
* *reducefunc* is a function that transforms each group of items
>>> iterable = 'aAAbBBcCC'
>>> keyfunc = lambda k: k.upper()
>>> valuefunc = lambda v: v.lower()
>>> reducefunc = lambda g: ''.join(g)
>>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc))
[('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')]
Each optional argument defaults to an identity function if not specified.
:func:`groupby_transform` is useful when grouping elements of an iterable
using a separate iterable as the key. To do this, :func:`zip` the iterables
and pass a *keyfunc* that extracts the first element and a *valuefunc*
that extracts the second element::
>>> from operator import itemgetter
>>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
>>> values = 'abcdefghi'
>>> iterable = zip(keys, values)
>>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
>>> [(k, ''.join(g)) for k, g in grouper]
[(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
Note that the order of items in the iterable is significant.
Only adjacent items are grouped together, so if you don't want any
duplicate groups, you should sort the iterable by the key function.
"""
ret = groupby(iterable, keyfunc)
if valuefunc:
ret = ((k, map(valuefunc, g)) for k, g in ret)
if reducefunc:
ret = ((k, reducefunc(g)) for k, g in ret)
return ret
class numeric_range(abc.Sequence, abc.Hashable):
"""An extension of the built-in ``range()`` function whose arguments can
be any orderable numeric type.
With only *stop* specified, *start* defaults to ``0`` and *step*
defaults to ``1``. The output items will match the type of *stop*:
>>> list(numeric_range(3.5))
[0.0, 1.0, 2.0, 3.0]
With only *start* and *stop* specified, *step* defaults to ``1``. The
output items will match the type of *start*:
>>> from decimal import Decimal
>>> start = Decimal('2.1')
>>> stop = Decimal('5.1')
>>> list(numeric_range(start, stop))
[Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
With *start*, *stop*, and *step* specified the output items will match
the type of ``start + step``:
>>> from fractions import Fraction
>>> start = Fraction(1, 2) # Start at 1/2
>>> stop = Fraction(5, 2) # End at 5/2
>>> step = Fraction(1, 2) # Count by 1/2
>>> list(numeric_range(start, stop, step))
[Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
>>> list(numeric_range(3, -1, -1.0))
[3.0, 2.0, 1.0, 0.0]
Be aware of the limitations of floating point numbers; the representation
of the yielded numbers may be surprising.
``datetime.datetime`` objects can be used for *start* and *stop*, if *step*
is a ``datetime.timedelta`` object:
>>> import datetime
>>> start = datetime.datetime(2019, 1, 1)
>>> stop = datetime.datetime(2019, 1, 3)
>>> step = datetime.timedelta(days=1)
>>> items = iter(numeric_range(start, stop, step))
>>> next(items)
datetime.datetime(2019, 1, 1, 0, 0)
>>> next(items)
datetime.datetime(2019, 1, 2, 0, 0)
"""
_EMPTY_HASH = hash(range(0, 0))
def __init__(self, *args):
argc = len(args)
if argc == 1:
(self._stop,) = args
self._start = type(self._stop)(0)
self._step = type(self._stop - self._start)(1)
elif argc == 2:
self._start, self._stop = args
self._step = type(self._stop - self._start)(1)
elif argc == 3:
self._start, self._stop, self._step = args
elif argc == 0:
raise TypeError(
'numeric_range expected at least '
'1 argument, got {}'.format(argc)
)
else:
raise TypeError(
'numeric_range expected at most '
'3 arguments, got {}'.format(argc)
)
self._zero = type(self._step)(0)
if self._step == self._zero:
raise ValueError('numeric_range() arg 3 must not be zero')
self._growing = self._step > self._zero
self._init_len()
def __bool__(self):
if self._growing:
return self._start < self._stop
else:
return self._start > self._stop
def __contains__(self, elem):
if self._growing:
if self._start <= elem < self._stop:
return (elem - self._start) % self._step == self._zero
else:
if self._start >= elem > self._stop:
return (self._start - elem) % (-self._step) == self._zero
return False
def __eq__(self, other):
if isinstance(other, numeric_range):
empty_self = not bool(self)
empty_other = not bool(other)
if empty_self or empty_other:
return empty_self and empty_other # True if both empty
else:
return (
self._start == other._start
and self._step == other._step
and self._get_by_index(-1) == other._get_by_index(-1)
)
else:
return False
def __getitem__(self, key):
if isinstance(key, int):
return self._get_by_index(key)
elif isinstance(key, slice):
step = self._step if key.step is None else key.step * self._step
if key.start is None or key.start <= -self._len:
start = self._start
elif key.start >= self._len:
start = self._stop
else: # -self._len < key.start < self._len
start = self._get_by_index(key.start)
if key.stop is None or key.stop >= self._len:
stop = self._stop
elif key.stop <= -self._len:
stop = self._start
else: # -self._len < key.stop < self._len
stop = self._get_by_index(key.stop)
return numeric_range(start, stop, step)
else:
raise TypeError(
'numeric range indices must be '
'integers or slices, not {}'.format(type(key).__name__)
)
def __hash__(self):
if self:
return hash((self._start, self._get_by_index(-1), self._step))
else:
return self._EMPTY_HASH
def __iter__(self):
values = (self._start + (n * self._step) for n in count())
if self._growing:
return takewhile(partial(gt, self._stop), values)
else:
return takewhile(partial(lt, self._stop), values)
def __len__(self):
return self._len
def _init_len(self):
if self._growing:
start = self._start
stop = self._stop
step = self._step
else:
start = self._stop
stop = self._start
step = -self._step
distance = stop - start
if distance <= self._zero:
self._len = 0
else: # distance > 0 and step > 0: regular euclidean division
q, r = divmod(distance, step)
self._len = int(q) + int(r != self._zero)
def __reduce__(self):
return numeric_range, (self._start, self._stop, self._step)
def __repr__(self):
if self._step == 1:
return "numeric_range({}, {})".format(
repr(self._start), repr(self._stop)
)
else:
return "numeric_range({}, {}, {})".format(
repr(self._start), repr(self._stop), repr(self._step)
)
def __reversed__(self):
return iter(
numeric_range(
self._get_by_index(-1), self._start - self._step, -self._step
)
)
def count(self, value):
return int(value in self)
def index(self, value):
if self._growing:
if self._start <= value < self._stop:
q, r = divmod(value - self._start, self._step)
if r == self._zero:
return int(q)
else:
if self._start >= value > self._stop:
q, r = divmod(self._start - value, -self._step)
if r == self._zero:
return int(q)
raise ValueError("{} is not in numeric range".format(value))
def _get_by_index(self, i):
if i < 0:
i += self._len
if i < 0 or i >= self._len:
raise IndexError("numeric range object index out of range")
return self._start + i * self._step
def count_cycle(iterable, n=None):
"""Cycle through the items from *iterable* up to *n* times, yielding
the number of completed cycles along with each item. If *n* is omitted the
process repeats indefinitely.
>>> list(count_cycle('AB', 3))
[(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')]
"""
iterable = tuple(iterable)
if not iterable:
return iter(())
counter = count() if n is None else range(n)
return ((i, item) for i in counter for item in iterable)
def mark_ends(iterable):
"""Yield 3-tuples of the form ``(is_first, is_last, item)``.
>>> list(mark_ends('ABC'))
[(True, False, 'A'), (False, False, 'B'), (False, True, 'C')]
Use this when looping over an iterable to take special action on its first
and/or last items:
>>> iterable = ['Header', 100, 200, 'Footer']
>>> total = 0
>>> for is_first, is_last, item in mark_ends(iterable):
... if is_first:
... continue # Skip the header
... if is_last:
... continue # Skip the footer
... total += item
>>> print(total)
300
"""
it = iter(iterable)
try:
b = next(it)
except StopIteration:
return
try:
for i in count():
a = b
b = next(it)
yield i == 0, False, a
except StopIteration:
yield i == 0, True, a
def locate(iterable, pred=bool, window_size=None):
"""Yield the index of each item in *iterable* for which *pred* returns
``True``.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(locate([0, 1, 1, 0, 1, 0, 0]))
[1, 2, 4]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item.
>>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b'))
[1, 3]
If *window_size* is given, then the *pred* function will be called with
that many items. This enables searching for sub-sequences:
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> pred = lambda *args: args == (1, 2, 3)
>>> list(locate(iterable, pred=pred, window_size=3))
[1, 5, 9]
Use with :func:`seekable` to find indexes and then retrieve the associated
items:
>>> from itertools import count
>>> from more_itertools import seekable
>>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count())
>>> it = seekable(source)
>>> pred = lambda x: x > 100
>>> indexes = locate(it, pred=pred)
>>> i = next(indexes)
>>> it.seek(i)
>>> next(it)
106
"""
if window_size is None:
return compress(count(), map(pred, iterable))
if window_size < 1:
raise ValueError('window size must be at least 1')
it = windowed(iterable, window_size, fillvalue=_marker)
return compress(count(), starmap(pred, it))
def longest_common_prefix(iterables):
"""Yield elements of the longest common prefix amongst given *iterables*.
>>> ''.join(longest_common_prefix(['abcd', 'abc', 'abf']))
'ab'
"""
return (c[0] for c in takewhile(all_equal, zip(*iterables)))
def lstrip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the beginning
for which *pred* returns ``True``.
For example, to remove a set of items from the start of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(lstrip(iterable, pred))
[1, 2, None, 3, False, None]
This function is analogous to to :func:`str.lstrip`, and is essentially
an wrapper for :func:`itertools.dropwhile`.
"""
return dropwhile(pred, iterable)
def rstrip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the end
for which *pred* returns ``True``.
For example, to remove a set of items from the end of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(rstrip(iterable, pred))
[None, False, None, 1, 2, None, 3]
This function is analogous to :func:`str.rstrip`.
"""
cache = []
cache_append = cache.append
cache_clear = cache.clear
for x in iterable:
if pred(x):
cache_append(x)
else:
yield from cache
cache_clear()
yield x
def strip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the
beginning and end for which *pred* returns ``True``.
For example, to remove a set of items from both ends of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(strip(iterable, pred))
[1, 2, None, 3]
This function is analogous to :func:`str.strip`.
"""
return rstrip(lstrip(iterable, pred), pred)
class islice_extended:
"""An extension of :func:`itertools.islice` that supports negative values
for *stop*, *start*, and *step*.
>>> iterable = iter('abcdefgh')
>>> list(islice_extended(iterable, -4, -1))
['e', 'f', 'g']
Slices with negative values require some caching of *iterable*, but this
function takes care to minimize the amount of memory required.
For example, you can use a negative step with an infinite iterator:
>>> from itertools import count
>>> list(islice_extended(count(), 110, 99, -2))
[110, 108, 106, 104, 102, 100]
You can also use slice notation directly:
>>> iterable = map(str, count())
>>> it = islice_extended(iterable)[10:20:2]
>>> list(it)
['10', '12', '14', '16', '18']
"""
def __init__(self, iterable, *args):
it = iter(iterable)
if args:
self._iterable = _islice_helper(it, slice(*args))
else:
self._iterable = it
def __iter__(self):
return self
def __next__(self):
return next(self._iterable)
def __getitem__(self, key):
if isinstance(key, slice):
return islice_extended(_islice_helper(self._iterable, key))
raise TypeError('islice_extended.__getitem__ argument must be a slice')
def _islice_helper(it, s):
start = s.start
stop = s.stop
if s.step == 0:
raise ValueError('step argument must be a non-zero integer or None.')
step = s.step or 1
if step > 0:
start = 0 if (start is None) else start
if start < 0:
# Consume all but the last -start items
cache = deque(enumerate(it, 1), maxlen=-start)
len_iter = cache[-1][0] if cache else 0
# Adjust start to be positive
i = max(len_iter + start, 0)
# Adjust stop to be positive
if stop is None:
j = len_iter
elif stop >= 0:
j = min(stop, len_iter)
else:
j = max(len_iter + stop, 0)
# Slice the cache
n = j - i
if n <= 0:
return
for index, item in islice(cache, 0, n, step):
yield item
elif (stop is not None) and (stop < 0):
# Advance to the start position
next(islice(it, start, start), None)
# When stop is negative, we have to carry -stop items while
# iterating
cache = deque(islice(it, -stop), maxlen=-stop)
for index, item in enumerate(it):
cached_item = cache.popleft()
if index % step == 0:
yield cached_item
cache.append(item)
else:
# When both start and stop are positive we have the normal case
yield from islice(it, start, stop, step)
else:
start = -1 if (start is None) else start
if (stop is not None) and (stop < 0):
# Consume all but the last items
n = -stop - 1
cache = deque(enumerate(it, 1), maxlen=n)
len_iter = cache[-1][0] if cache else 0
# If start and stop are both negative they are comparable and
# we can just slice. Otherwise we can adjust start to be negative
# and then slice.
if start < 0:
i, j = start, stop
else:
i, j = min(start - len_iter, -1), None
for index, item in list(cache)[i:j:step]:
yield item
else:
# Advance to the stop position
if stop is not None:
m = stop + 1
next(islice(it, m, m), None)
# stop is positive, so if start is negative they are not comparable
# and we need the rest of the items.
if start < 0:
i = start
n = None
# stop is None and start is positive, so we just need items up to
# the start index.
elif stop is None:
i = None
n = start + 1
# Both stop and start are positive, so they are comparable.
else:
i = None
n = start - stop
if n <= 0:
return
cache = list(islice(it, n))
yield from cache[i::step]
def always_reversible(iterable):
"""An extension of :func:`reversed` that supports all iterables, not
just those which implement the ``Reversible`` or ``Sequence`` protocols.
>>> print(*always_reversible(x for x in range(3)))
2 1 0
If the iterable is already reversible, this function returns the
result of :func:`reversed()`. If the iterable is not reversible,
this function will cache the remaining items in the iterable and
yield them in reverse order, which may require significant storage.
"""
try:
return reversed(iterable)
except TypeError:
return reversed(list(iterable))
def consecutive_groups(iterable, ordering=lambda x: x):
"""Yield groups of consecutive items using :func:`itertools.groupby`.
The *ordering* function determines whether two items are adjacent by
returning their position.
By default, the ordering function is the identity function. This is
suitable for finding runs of numbers:
>>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40]
>>> for group in consecutive_groups(iterable):
... print(list(group))
[1]
[10, 11, 12]
[20]
[30, 31, 32, 33]
[40]
For finding runs of adjacent letters, try using the :meth:`index` method
of a string of letters:
>>> from string import ascii_lowercase
>>> iterable = 'abcdfgilmnop'
>>> ordering = ascii_lowercase.index
>>> for group in consecutive_groups(iterable, ordering):
... print(list(group))
['a', 'b', 'c', 'd']
['f', 'g']
['i']
['l', 'm', 'n', 'o', 'p']
Each group of consecutive items is an iterator that shares it source with
*iterable*. When an an output group is advanced, the previous group is
no longer available unless its elements are copied (e.g., into a ``list``).
>>> iterable = [1, 2, 11, 12, 21, 22]
>>> saved_groups = []
>>> for group in consecutive_groups(iterable):
... saved_groups.append(list(group)) # Copy group elements
>>> saved_groups
[[1, 2], [11, 12], [21, 22]]
"""
for k, g in groupby(
enumerate(iterable), key=lambda x: x[0] - ordering(x[1])
):
yield map(itemgetter(1), g)
def difference(iterable, func=sub, *, initial=None):
"""This function is the inverse of :func:`itertools.accumulate`. By default
it will compute the first difference of *iterable* using
:func:`operator.sub`:
>>> from itertools import accumulate
>>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10
>>> list(difference(iterable))
[0, 1, 2, 3, 4]
*func* defaults to :func:`operator.sub`, but other functions can be
specified. They will be applied as follows::
A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
For example, to do progressive division:
>>> iterable = [1, 2, 6, 24, 120]
>>> func = lambda x, y: x // y
>>> list(difference(iterable, func))
[1, 2, 3, 4, 5]
If the *initial* keyword is set, the first element will be skipped when
computing successive differences.
>>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10)
>>> list(difference(it, initial=10))
[1, 2, 3]
"""
a, b = tee(iterable)
try:
first = [next(b)]
except StopIteration:
return iter([])
if initial is not None:
first = []
return chain(first, map(func, b, a))
class SequenceView(Sequence):
"""Return a read-only view of the sequence object *target*.
:class:`SequenceView` objects are analogous to Python's built-in
"dictionary view" types. They provide a dynamic view of a sequence's items,
meaning that when the sequence updates, so does the view.
>>> seq = ['0', '1', '2']
>>> view = SequenceView(seq)
>>> view
SequenceView(['0', '1', '2'])
>>> seq.append('3')
>>> view
SequenceView(['0', '1', '2', '3'])
Sequence views support indexing, slicing, and length queries. They act
like the underlying sequence, except they don't allow assignment:
>>> view[1]
'1'
>>> view[1:-1]
['1', '2']
>>> len(view)
4
Sequence views are useful as an alternative to copying, as they don't
require (much) extra storage.
"""
def __init__(self, target):
if not isinstance(target, Sequence):
raise TypeError
self._target = target
def __getitem__(self, index):
return self._target[index]
def __len__(self):
return len(self._target)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, repr(self._target))
class seekable:
"""Wrap an iterator to allow for seeking backward and forward. This
progressively caches the items in the source iterable so they can be
re-visited.
Call :meth:`seek` with an index to seek to that position in the source
iterable.
To "reset" an iterator, seek to ``0``:
>>> from itertools import count
>>> it = seekable((str(n) for n in count()))
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> it.seek(0)
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> next(it)
'3'
You can also seek forward:
>>> it = seekable((str(n) for n in range(20)))
>>> it.seek(10)
>>> next(it)
'10'
>>> it.seek(20) # Seeking past the end of the source isn't a problem
>>> list(it)
[]
>>> it.seek(0) # Resetting works even after hitting the end
>>> next(it), next(it), next(it)
('0', '1', '2')
Call :meth:`peek` to look ahead one item without advancing the iterator:
>>> it = seekable('1234')
>>> it.peek()
'1'
>>> list(it)
['1', '2', '3', '4']
>>> it.peek(default='empty')
'empty'
Before the iterator is at its end, calling :func:`bool` on it will return
``True``. After it will return ``False``:
>>> it = seekable('5678')
>>> bool(it)
True
>>> list(it)
['5', '6', '7', '8']
>>> bool(it)
False
You may view the contents of the cache with the :meth:`elements` method.
That returns a :class:`SequenceView`, a view that updates automatically:
>>> it = seekable((str(n) for n in range(10)))
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> elements = it.elements()
>>> elements
SequenceView(['0', '1', '2'])
>>> next(it)
'3'
>>> elements
SequenceView(['0', '1', '2', '3'])
By default, the cache grows as the source iterable progresses, so beware of
wrapping very large or infinite iterables. Supply *maxlen* to limit the
size of the cache (this of course limits how far back you can seek).
>>> from itertools import count
>>> it = seekable((str(n) for n in count()), maxlen=2)
>>> next(it), next(it), next(it), next(it)
('0', '1', '2', '3')
>>> list(it.elements())
['2', '3']
>>> it.seek(0)
>>> next(it), next(it), next(it), next(it)
('2', '3', '4', '5')
>>> next(it)
'6'
"""
def __init__(self, iterable, maxlen=None):
self._source = iter(iterable)
if maxlen is None:
self._cache = []
else:
self._cache = deque([], maxlen)
self._index = None
def __iter__(self):
return self
def __next__(self):
if self._index is not None:
try:
item = self._cache[self._index]
except IndexError:
self._index = None
else:
self._index += 1
return item
item = next(self._source)
self._cache.append(item)
return item
def __bool__(self):
try:
self.peek()
except StopIteration:
return False
return True
def peek(self, default=_marker):
try:
peeked = next(self)
except StopIteration:
if default is _marker:
raise
return default
if self._index is None:
self._index = len(self._cache)
self._index -= 1
return peeked
def elements(self):
return SequenceView(self._cache)
def seek(self, index):
self._index = index
remainder = index - len(self._cache)
if remainder > 0:
consume(self, remainder)
class run_length:
"""
:func:`run_length.encode` compresses an iterable with run-length encoding.
It yields groups of repeated items with the count of how many times they
were repeated:
>>> uncompressed = 'abbcccdddd'
>>> list(run_length.encode(uncompressed))
[('a', 1), ('b', 2), ('c', 3), ('d', 4)]
:func:`run_length.decode` decompresses an iterable that was previously
compressed with run-length encoding. It yields the items of the
decompressed iterable:
>>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
>>> list(run_length.decode(compressed))
['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd']
"""
@staticmethod
def encode(iterable):
return ((k, ilen(g)) for k, g in groupby(iterable))
@staticmethod
def decode(iterable):
return chain.from_iterable(repeat(k, n) for k, n in iterable)
def exactly_n(iterable, n, predicate=bool):
"""Return ``True`` if exactly ``n`` items in the iterable are ``True``
according to the *predicate* function.
>>> exactly_n([True, True, False], 2)
True
>>> exactly_n([True, True, False], 1)
False
>>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3)
True
The iterable will be advanced until ``n + 1`` truthy items are encountered,
so avoid calling it on infinite iterables.
"""
return len(take(n + 1, filter(predicate, iterable))) == n
def circular_shifts(iterable):
"""Return a list of circular shifts of *iterable*.
>>> circular_shifts(range(4))
[(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)]
"""
lst = list(iterable)
return take(len(lst), windowed(cycle(lst), len(lst)))
def make_decorator(wrapping_func, result_index=0):
"""Return a decorator version of *wrapping_func*, which is a function that
modifies an iterable. *result_index* is the position in that function's
signature where the iterable goes.
This lets you use itertools on the "production end," i.e. at function
definition. This can augment what the function returns without changing the
function's code.
For example, to produce a decorator version of :func:`chunked`:
>>> from more_itertools import chunked
>>> chunker = make_decorator(chunked, result_index=0)
>>> @chunker(3)
... def iter_range(n):
... return iter(range(n))
...
>>> list(iter_range(9))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
To only allow truthy items to be returned:
>>> truth_serum = make_decorator(filter, result_index=1)
>>> @truth_serum(bool)
... def boolean_test():
... return [0, 1, '', ' ', False, True]
...
>>> list(boolean_test())
[1, ' ', True]
The :func:`peekable` and :func:`seekable` wrappers make for practical
decorators:
>>> from more_itertools import peekable
>>> peekable_function = make_decorator(peekable)
>>> @peekable_function()
... def str_range(*args):
... return (str(x) for x in range(*args))
...
>>> it = str_range(1, 20, 2)
>>> next(it), next(it), next(it)
('1', '3', '5')
>>> it.peek()
'7'
>>> next(it)
'7'
"""
# See https://sites.google.com/site/bbayles/index/decorator_factory for
# notes on how this works.
def decorator(*wrapping_args, **wrapping_kwargs):
def outer_wrapper(f):
def inner_wrapper(*args, **kwargs):
result = f(*args, **kwargs)
wrapping_args_ = list(wrapping_args)
wrapping_args_.insert(result_index, result)
return wrapping_func(*wrapping_args_, **wrapping_kwargs)
return inner_wrapper
return outer_wrapper
return decorator
def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None):
"""Return a dictionary that maps the items in *iterable* to categories
defined by *keyfunc*, transforms them with *valuefunc*, and
then summarizes them by category with *reducefunc*.
*valuefunc* defaults to the identity function if it is unspecified.
If *reducefunc* is unspecified, no summarization takes place:
>>> keyfunc = lambda x: x.upper()
>>> result = map_reduce('abbccc', keyfunc)
>>> sorted(result.items())
[('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])]
Specifying *valuefunc* transforms the categorized items:
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: 1
>>> result = map_reduce('abbccc', keyfunc, valuefunc)
>>> sorted(result.items())
[('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])]
Specifying *reducefunc* summarizes the categorized items:
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: 1
>>> reducefunc = sum
>>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc)
>>> sorted(result.items())
[('A', 1), ('B', 2), ('C', 3)]
You may want to filter the input iterable before applying the map/reduce
procedure:
>>> all_items = range(30)
>>> items = [x for x in all_items if 10 <= x <= 20] # Filter
>>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1
>>> categories = map_reduce(items, keyfunc=keyfunc)
>>> sorted(categories.items())
[(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])]
>>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum)
>>> sorted(summaries.items())
[(0, 90), (1, 75)]
Note that all items in the iterable are gathered into a list before the
summarization step, which may require significant storage.
The returned object is a :obj:`collections.defaultdict` with the
``default_factory`` set to ``None``, such that it behaves like a normal
dictionary.
"""
valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc
ret = defaultdict(list)
for item in iterable:
key = keyfunc(item)
value = valuefunc(item)
ret[key].append(value)
if reducefunc is not None:
for key, value_list in ret.items():
ret[key] = reducefunc(value_list)
ret.default_factory = None
return ret
def rlocate(iterable, pred=bool, window_size=None):
"""Yield the index of each item in *iterable* for which *pred* returns
``True``, starting from the right and moving left.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4
[4, 2, 1]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item:
>>> iterable = iter('abcb')
>>> pred = lambda x: x == 'b'
>>> list(rlocate(iterable, pred))
[3, 1]
If *window_size* is given, then the *pred* function will be called with
that many items. This enables searching for sub-sequences:
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> pred = lambda *args: args == (1, 2, 3)
>>> list(rlocate(iterable, pred=pred, window_size=3))
[9, 5, 1]
Beware, this function won't return anything for infinite iterables.
If *iterable* is reversible, ``rlocate`` will reverse it and search from
the right. Otherwise, it will search from the left and return the results
in reverse order.
See :func:`locate` to for other example applications.
"""
if window_size is None:
try:
len_iter = len(iterable)
return (len_iter - i - 1 for i in locate(reversed(iterable), pred))
except TypeError:
pass
return reversed(list(locate(iterable, pred, window_size)))
def replace(iterable, pred, substitutes, count=None, window_size=1):
"""Yield the items from *iterable*, replacing the items for which *pred*
returns ``True`` with the items from the iterable *substitutes*.
>>> iterable = [1, 1, 0, 1, 1, 0, 1, 1]
>>> pred = lambda x: x == 0
>>> substitutes = (2, 3)
>>> list(replace(iterable, pred, substitutes))
[1, 1, 2, 3, 1, 1, 2, 3, 1, 1]
If *count* is given, the number of replacements will be limited:
>>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0]
>>> pred = lambda x: x == 0
>>> substitutes = [None]
>>> list(replace(iterable, pred, substitutes, count=2))
[1, 1, None, 1, 1, None, 1, 1, 0]
Use *window_size* to control the number of items passed as arguments to
*pred*. This allows for locating and replacing subsequences.
>>> iterable = [0, 1, 2, 5, 0, 1, 2, 5]
>>> window_size = 3
>>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred
>>> substitutes = [3, 4] # Splice in these items
>>> list(replace(iterable, pred, substitutes, window_size=window_size))
[3, 4, 5, 3, 4, 5]
"""
if window_size < 1:
raise ValueError('window_size must be at least 1')
# Save the substitutes iterable, since it's used more than once
substitutes = tuple(substitutes)
# Add padding such that the number of windows matches the length of the
# iterable
it = chain(iterable, [_marker] * (window_size - 1))
windows = windowed(it, window_size)
n = 0
for w in windows:
# If the current window matches our predicate (and we haven't hit
# our maximum number of replacements), splice in the substitutes
# and then consume the following windows that overlap with this one.
# For example, if the iterable is (0, 1, 2, 3, 4...)
# and the window size is 2, we have (0, 1), (1, 2), (2, 3)...
# If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2)
if pred(*w):
if (count is None) or (n < count):
n += 1
yield from substitutes
consume(windows, window_size - 1)
continue
# If there was no match (or we've reached the replacement limit),
# yield the first item from the window.
if w and (w[0] is not _marker):
yield w[0]
def partitions(iterable):
"""Yield all possible order-preserving partitions of *iterable*.
>>> iterable = 'abc'
>>> for part in partitions(iterable):
... print([''.join(p) for p in part])
['abc']
['a', 'bc']
['ab', 'c']
['a', 'b', 'c']
This is unrelated to :func:`partition`.
"""
sequence = list(iterable)
n = len(sequence)
for i in powerset(range(1, n)):
yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))]
def set_partitions(iterable, k=None):
"""
Yield the set partitions of *iterable* into *k* parts. Set partitions are
not order-preserving.
>>> iterable = 'abc'
>>> for part in set_partitions(iterable, 2):
... print([''.join(p) for p in part])
['a', 'bc']
['ab', 'c']
['b', 'ac']
If *k* is not given, every set partition is generated.
>>> iterable = 'abc'
>>> for part in set_partitions(iterable):
... print([''.join(p) for p in part])
['abc']
['a', 'bc']
['ab', 'c']
['b', 'ac']
['a', 'b', 'c']
"""
L = list(iterable)
n = len(L)
if k is not None:
if k < 1:
raise ValueError(
"Can't partition in a negative or zero number of groups"
)
elif k > n:
return
def set_partitions_helper(L, k):
n = len(L)
if k == 1:
yield [L]
elif n == k:
yield [[s] for s in L]
else:
e, *M = L
for p in set_partitions_helper(M, k - 1):
yield [[e], *p]
for p in set_partitions_helper(M, k):
for i in range(len(p)):
yield p[:i] + [[e] + p[i]] + p[i + 1 :]
if k is None:
for k in range(1, n + 1):
yield from set_partitions_helper(L, k)
else:
yield from set_partitions_helper(L, k)
class time_limited:
"""
Yield items from *iterable* until *limit_seconds* have passed.
If the time limit expires before all items have been yielded, the
``timed_out`` parameter will be set to ``True``.
>>> from time import sleep
>>> def generator():
... yield 1
... yield 2
... sleep(0.2)
... yield 3
>>> iterable = time_limited(0.1, generator())
>>> list(iterable)
[1, 2]
>>> iterable.timed_out
True
Note that the time is checked before each item is yielded, and iteration
stops if the time elapsed is greater than *limit_seconds*. If your time
limit is 1 second, but it takes 2 seconds to generate the first item from
the iterable, the function will run for 2 seconds and not yield anything.
"""
def __init__(self, limit_seconds, iterable):
if limit_seconds < 0:
raise ValueError('limit_seconds must be positive')
self.limit_seconds = limit_seconds
self._iterable = iter(iterable)
self._start_time = monotonic()
self.timed_out = False
def __iter__(self):
return self
def __next__(self):
item = next(self._iterable)
if monotonic() - self._start_time > self.limit_seconds:
self.timed_out = True
raise StopIteration
return item
def only(iterable, default=None, too_long=None):
"""If *iterable* has only one item, return it.
If it has zero items, return *default*.
If it has more than one item, raise the exception given by *too_long*,
which is ``ValueError`` by default.
>>> only([], default='missing')
'missing'
>>> only([1])
1
>>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Expected exactly one item in iterable, but got 1, 2,
and perhaps more.'
>>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError
Note that :func:`only` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check
iterable contents less destructively.
"""
it = iter(iterable)
first_value = next(it, default)
try:
second_value = next(it)
except StopIteration:
pass
else:
msg = (
'Expected exactly one item in iterable, but got {!r}, {!r}, '
'and perhaps more.'.format(first_value, second_value)
)
raise too_long or ValueError(msg)
return first_value
class _IChunk:
def __init__(self, iterable, n):
self._it = islice(iterable, n)
self._cache = deque()
def fill_cache(self):
self._cache.extend(self._it)
def __iter__(self):
return self
def __next__(self):
try:
return next(self._it)
except StopIteration:
if self._cache:
return self._cache.popleft()
else:
raise
def ichunked(iterable, n):
"""Break *iterable* into sub-iterables with *n* elements each.
:func:`ichunked` is like :func:`chunked`, but it yields iterables
instead of lists.
If the sub-iterables are read in order, the elements of *iterable*
won't be stored in memory.
If they are read out of order, :func:`itertools.tee` is used to cache
elements as necessary.
>>> from itertools import count
>>> all_chunks = ichunked(count(), 4)
>>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks)
>>> list(c_2) # c_1's elements have been cached; c_3's haven't been
[4, 5, 6, 7]
>>> list(c_1)
[0, 1, 2, 3]
>>> list(c_3)
[8, 9, 10, 11]
"""
source = peekable(iter(iterable))
ichunk_marker = object()
while True:
# Check to see whether we're at the end of the source iterable
item = source.peek(ichunk_marker)
if item is ichunk_marker:
return
chunk = _IChunk(source, n)
yield chunk
# Advance the source iterable and fill previous chunk's cache
chunk.fill_cache()
def iequals(*iterables):
"""Return ``True`` if all given *iterables* are equal to each other,
which means that they contain the same elements in the same order.
The function is useful for comparing iterables of different data types
or iterables that do not support equality checks.
>>> iequals("abc", ['a', 'b', 'c'], ('a', 'b', 'c'), iter("abc"))
True
>>> iequals("abc", "acb")
False
Not to be confused with :func:`all_equals`, which checks whether all
elements of iterable are equal to each other.
"""
return all(map(all_equal, zip_longest(*iterables, fillvalue=object())))
def distinct_combinations(iterable, r):
"""Yield the distinct combinations of *r* items taken from *iterable*.
>>> list(distinct_combinations([0, 0, 1], 2))
[(0, 0), (0, 1)]
Equivalent to ``set(combinations(iterable))``, except duplicates are not
generated and thrown away. For larger input sequences this is much more
efficient.
"""
if r < 0:
raise ValueError('r must be non-negative')
elif r == 0:
yield ()
return
pool = tuple(iterable)
generators = [unique_everseen(enumerate(pool), key=itemgetter(1))]
current_combo = [None] * r
level = 0
while generators:
try:
cur_idx, p = next(generators[-1])
except StopIteration:
generators.pop()
level -= 1
continue
current_combo[level] = p
if level + 1 == r:
yield tuple(current_combo)
else:
generators.append(
unique_everseen(
enumerate(pool[cur_idx + 1 :], cur_idx + 1),
key=itemgetter(1),
)
)
level += 1
def filter_except(validator, iterable, *exceptions):
"""Yield the items from *iterable* for which the *validator* function does
not raise one of the specified *exceptions*.
*validator* is called for each item in *iterable*.
It should be a function that accepts one argument and raises an exception
if that item is not valid.
>>> iterable = ['1', '2', 'three', '4', None]
>>> list(filter_except(int, iterable, ValueError, TypeError))
['1', '2', '4']
If an exception other than one given by *exceptions* is raised by
*validator*, it is raised like normal.
"""
for item in iterable:
try:
validator(item)
except exceptions:
pass
else:
yield item
def map_except(function, iterable, *exceptions):
"""Transform each item from *iterable* with *function* and yield the
result, unless *function* raises one of the specified *exceptions*.
*function* is called to transform each item in *iterable*.
It should accept one argument.
>>> iterable = ['1', '2', 'three', '4', None]
>>> list(map_except(int, iterable, ValueError, TypeError))
[1, 2, 4]
If an exception other than one given by *exceptions* is raised by
*function*, it is raised like normal.
"""
for item in iterable:
try:
yield function(item)
except exceptions:
pass
def map_if(iterable, pred, func, func_else=lambda x: x):
"""Evaluate each item from *iterable* using *pred*. If the result is
equivalent to ``True``, transform the item with *func* and yield it.
Otherwise, transform the item with *func_else* and yield it.
*pred*, *func*, and *func_else* should each be functions that accept
one argument. By default, *func_else* is the identity function.
>>> from math import sqrt
>>> iterable = list(range(-5, 5))
>>> iterable
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
>>> list(map_if(iterable, lambda x: x > 3, lambda x: 'toobig'))
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 'toobig']
>>> list(map_if(iterable, lambda x: x >= 0,
... lambda x: f'{sqrt(x):.2f}', lambda x: None))
[None, None, None, None, None, '0.00', '1.00', '1.41', '1.73', '2.00']
"""
for item in iterable:
yield func(item) if pred(item) else func_else(item)
def _sample_unweighted(iterable, k):
# Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li:
# "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))".
# Fill up the reservoir (collection of samples) with the first `k` samples
reservoir = take(k, iterable)
# Generate random number that's the largest in a sample of k U(0,1) numbers
# Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic
W = exp(log(random()) / k)
# The number of elements to skip before changing the reservoir is a random
# number with a geometric distribution. Sample it using random() and logs.
next_index = k + floor(log(random()) / log(1 - W))
for index, element in enumerate(iterable, k):
if index == next_index:
reservoir[randrange(k)] = element
# The new W is the largest in a sample of k U(0, `old_W`) numbers
W *= exp(log(random()) / k)
next_index += floor(log(random()) / log(1 - W)) + 1
return reservoir
def _sample_weighted(iterable, k, weights):
# Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. :
# "Weighted random sampling with a reservoir".
# Log-transform for numerical stability for weights that are small/large
weight_keys = (log(random()) / weight for weight in weights)
# Fill up the reservoir (collection of samples) with the first `k`
# weight-keys and elements, then heapify the list.
reservoir = take(k, zip(weight_keys, iterable))
heapify(reservoir)
# The number of jumps before changing the reservoir is a random variable
# with an exponential distribution. Sample it using random() and logs.
smallest_weight_key, _ = reservoir[0]
weights_to_skip = log(random()) / smallest_weight_key
for weight, element in zip(weights, iterable):
if weight >= weights_to_skip:
# The notation here is consistent with the paper, but we store
# the weight-keys in log-space for better numerical stability.
smallest_weight_key, _ = reservoir[0]
t_w = exp(weight * smallest_weight_key)
r_2 = uniform(t_w, 1) # generate U(t_w, 1)
weight_key = log(r_2) / weight
heapreplace(reservoir, (weight_key, element))
smallest_weight_key, _ = reservoir[0]
weights_to_skip = log(random()) / smallest_weight_key
else:
weights_to_skip -= weight
# Equivalent to [element for weight_key, element in sorted(reservoir)]
return [heappop(reservoir)[1] for _ in range(k)]
def sample(iterable, k, weights=None):
"""Return a *k*-length list of elements chosen (without replacement)
from the *iterable*. Like :func:`random.sample`, but works on iterables
of unknown length.
>>> iterable = range(100)
>>> sample(iterable, 5) # doctest: +SKIP
[81, 60, 96, 16, 4]
An iterable with *weights* may also be given:
>>> iterable = range(100)
>>> weights = (i * i + 1 for i in range(100))
>>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP
[79, 67, 74, 66, 78]
The algorithm can also be used to generate weighted random permutations.
The relative weight of each item determines the probability that it
appears late in the permutation.
>>> data = "abcdefgh"
>>> weights = range(1, len(data) + 1)
>>> sample(data, k=len(data), weights=weights) # doctest: +SKIP
['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f']
"""
if k == 0:
return []
iterable = iter(iterable)
if weights is None:
return _sample_unweighted(iterable, k)
else:
weights = iter(weights)
return _sample_weighted(iterable, k, weights)
def is_sorted(iterable, key=None, reverse=False, strict=False):
"""Returns ``True`` if the items of iterable are in sorted order, and
``False`` otherwise. *key* and *reverse* have the same meaning that they do
in the built-in :func:`sorted` function.
>>> is_sorted(['1', '2', '3', '4', '5'], key=int)
True
>>> is_sorted([5, 4, 3, 1, 2], reverse=True)
False
If *strict*, tests for strict sorting, that is, returns ``False`` if equal
elements are found:
>>> is_sorted([1, 2, 2])
True
>>> is_sorted([1, 2, 2], strict=True)
False
The function returns ``False`` after encountering the first out-of-order
item. If there are no out-of-order items, the iterable is exhausted.
"""
compare = (le if reverse else ge) if strict else (lt if reverse else gt)
it = iterable if key is None else map(key, iterable)
return not any(starmap(compare, pairwise(it)))
class AbortThread(BaseException):
pass
class callback_iter:
"""Convert a function that uses callbacks to an iterator.
Let *func* be a function that takes a `callback` keyword argument.
For example:
>>> def func(callback=None):
... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]:
... if callback:
... callback(i, c)
... return 4
Use ``with callback_iter(func)`` to get an iterator over the parameters
that are delivered to the callback.
>>> with callback_iter(func) as it:
... for args, kwargs in it:
... print(args)
(1, 'a')
(2, 'b')
(3, 'c')
The function will be called in a background thread. The ``done`` property
indicates whether it has completed execution.
>>> it.done
True
If it completes successfully, its return value will be available
in the ``result`` property.
>>> it.result
4
Notes:
* If the function uses some keyword argument besides ``callback``, supply
*callback_kwd*.
* If it finished executing, but raised an exception, accessing the
``result`` property will raise the same exception.
* If it hasn't finished executing, accessing the ``result``
property from within the ``with`` block will raise ``RuntimeError``.
* If it hasn't finished executing, accessing the ``result`` property from
outside the ``with`` block will raise a
``more_itertools.AbortThread`` exception.
* Provide *wait_seconds* to adjust how frequently the it is polled for
output.
"""
def __init__(self, func, callback_kwd='callback', wait_seconds=0.1):
self._func = func
self._callback_kwd = callback_kwd
self._aborted = False
self._future = None
self._wait_seconds = wait_seconds
# Lazily import concurrent.future
self._executor = __import__(
).futures.__import__("concurrent.futures").futures.ThreadPoolExecutor(max_workers=1)
self._iterator = self._reader()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._aborted = True
self._executor.shutdown()
def __iter__(self):
return self
def __next__(self):
return next(self._iterator)
@property
def done(self):
if self._future is None:
return False
return self._future.done()
@property
def result(self):
if not self.done:
raise RuntimeError('Function has not yet completed')
return self._future.result()
def _reader(self):
q = Queue()
def callback(*args, **kwargs):
if self._aborted:
raise AbortThread('canceled by user')
q.put((args, kwargs))
self._future = self._executor.submit(
self._func, **{self._callback_kwd: callback}
)
while True:
try:
item = q.get(timeout=self._wait_seconds)
except Empty:
pass
else:
q.task_done()
yield item
if self._future.done():
break
remaining = []
while True:
try:
item = q.get_nowait()
except Empty:
break
else:
q.task_done()
remaining.append(item)
q.join()
yield from remaining
def windowed_complete(iterable, n):
"""
Yield ``(beginning, middle, end)`` tuples, where:
* Each ``middle`` has *n* items from *iterable*
* Each ``beginning`` has the items before the ones in ``middle``
* Each ``end`` has the items after the ones in ``middle``
>>> iterable = range(7)
>>> n = 3
>>> for beginning, middle, end in windowed_complete(iterable, n):
... print(beginning, middle, end)
() (0, 1, 2) (3, 4, 5, 6)
(0,) (1, 2, 3) (4, 5, 6)
(0, 1) (2, 3, 4) (5, 6)
(0, 1, 2) (3, 4, 5) (6,)
(0, 1, 2, 3) (4, 5, 6) ()
Note that *n* must be at least 0 and most equal to the length of
*iterable*.
This function will exhaust the iterable and may require significant
storage.
"""
if n < 0:
raise ValueError('n must be >= 0')
seq = tuple(iterable)
size = len(seq)
if n > size:
raise ValueError('n must be <= len(seq)')
for i in range(size - n + 1):
beginning = seq[:i]
middle = seq[i : i + n]
end = seq[i + n :]
yield beginning, middle, end
def all_unique(iterable, key=None):
"""
Returns ``True`` if all the elements of *iterable* are unique (no two
elements are equal).
>>> all_unique('ABCB')
False
If a *key* function is specified, it will be used to make comparisons.
>>> all_unique('ABCb')
True
>>> all_unique('ABCb', str.lower)
False
The function returns as soon as the first non-unique element is
encountered. Iterables with a mix of hashable and unhashable items can
be used, but the function will be slower for unhashable items.
"""
seenset = set()
seenset_add = seenset.add
seenlist = []
seenlist_add = seenlist.append
for element in map(key, iterable) if key else iterable:
try:
if element in seenset:
return False
seenset_add(element)
except TypeError:
if element in seenlist:
return False
seenlist_add(element)
return True
def nth_product(index, *args):
"""Equivalent to ``list(product(*args))[index]``.
The products of *args* can be ordered lexicographically.
:func:`nth_product` computes the product at sort position *index* without
computing the previous products.
>>> nth_product(8, range(2), range(2), range(2), range(2))
(1, 0, 0, 0)
``IndexError`` will be raised if the given *index* is invalid.
"""
pools = list(map(tuple, reversed(args)))
ns = list(map(len, pools))
c = reduce(mul, ns)
if index < 0:
index += c
if not 0 <= index < c:
raise IndexError
result = []
for pool, n in zip(pools, ns):
result.append(pool[index % n])
index //= n
return tuple(reversed(result))
def nth_permutation(iterable, r, index):
"""Equivalent to ``list(permutations(iterable, r))[index]```
The subsequences of *iterable* that are of length *r* where order is
important can be ordered lexicographically. :func:`nth_permutation`
computes the subsequence at sort position *index* directly, without
computing the previous subsequences.
>>> nth_permutation('ghijk', 2, 5)
('h', 'i')
``ValueError`` will be raised If *r* is negative or greater than the length
of *iterable*.
``IndexError`` will be raised if the given *index* is invalid.
"""
pool = list(iterable)
n = len(pool)
if r is None or r == n:
r, c = n, factorial(n)
elif not 0 <= r < n:
raise ValueError
else:
c = factorial(n) // factorial(n - r)
if index < 0:
index += c
if not 0 <= index < c:
raise IndexError
if c == 0:
return tuple()
result = [0] * r
q = index * factorial(n) // c if r < n else index
for d in range(1, n + 1):
q, i = divmod(q, d)
if 0 <= n - d < r:
result[n - d] = i
if q == 0:
break
return tuple(map(pool.pop, result))
def value_chain(*args):
"""Yield all arguments passed to the function in the same order in which
they were passed. If an argument itself is iterable then iterate over its
values.
>>> list(value_chain(1, 2, 3, [4, 5, 6]))
[1, 2, 3, 4, 5, 6]
Binary and text strings are not considered iterable and are emitted
as-is:
>>> list(value_chain('12', '34', ['56', '78']))
['12', '34', '56', '78']
Multiple levels of nesting are not flattened.
"""
for value in args:
if isinstance(value, (str, bytes)):
yield value
continue
try:
yield from value
except TypeError:
yield value
def product_index(element, *args):
"""Equivalent to ``list(product(*args)).index(element)``
The products of *args* can be ordered lexicographically.
:func:`product_index` computes the first index of *element* without
computing the previous products.
>>> product_index([8, 2], range(10), range(5))
42
``ValueError`` will be raised if the given *element* isn't in the product
of *args*.
"""
index = 0
for x, pool in zip_longest(element, args, fillvalue=_marker):
if x is _marker or pool is _marker:
raise ValueError('element is not a product of args')
pool = tuple(pool)
index = index * len(pool) + pool.index(x)
return index
def combination_index(element, iterable):
"""Equivalent to ``list(combinations(iterable, r)).index(element)``
The subsequences of *iterable* that are of length *r* can be ordered
lexicographically. :func:`combination_index` computes the index of the
first *element*, without computing the previous combinations.
>>> combination_index('adf', 'abcdefg')
10
``ValueError`` will be raised if the given *element* isn't one of the
combinations of *iterable*.
"""
element = enumerate(element)
k, y = next(element, (None, None))
if k is None:
return 0
indexes = []
pool = enumerate(iterable)
for n, x in pool:
if x == y:
indexes.append(n)
tmp, y = next(element, (None, None))
if tmp is None:
break
else:
k = tmp
else:
raise ValueError('element is not a combination of iterable')
n, _ = last(pool, default=(n, None))
# Python versions below 3.8 don't have math.comb
index = 1
for i, j in enumerate(reversed(indexes), start=1):
j = n - j
if i <= j:
index += factorial(j) // (factorial(i) * factorial(j - i))
return factorial(n + 1) // (factorial(k + 1) * factorial(n - k)) - index
def permutation_index(element, iterable):
"""Equivalent to ``list(permutations(iterable, r)).index(element)```
The subsequences of *iterable* that are of length *r* where order is
important can be ordered lexicographically. :func:`permutation_index`
computes the index of the first *element* directly, without computing
the previous permutations.
>>> permutation_index([1, 3, 2], range(5))
19
``ValueError`` will be raised if the given *element* isn't one of the
permutations of *iterable*.
"""
index = 0
pool = list(iterable)
for i, x in zip(range(len(pool), -1, -1), element):
r = pool.index(x)
index = index * i + r
del pool[r]
return index
class countable:
"""Wrap *iterable* and keep a count of how many items have been consumed.
The ``items_seen`` attribute starts at ``0`` and increments as the iterable
is consumed:
>>> iterable = map(str, range(10))
>>> it = countable(iterable)
>>> it.items_seen
0
>>> next(it), next(it)
('0', '1')
>>> list(it)
['2', '3', '4', '5', '6', '7', '8', '9']
>>> it.items_seen
10
"""
def __init__(self, iterable):
self._it = iter(iterable)
self.items_seen = 0
def __iter__(self):
return self
def __next__(self):
item = next(self._it)
self.items_seen += 1
return item
def chunked_even(iterable, n):
"""Break *iterable* into lists of approximately length *n*.
Items are distributed such the lengths of the lists differ by at most
1 item.
>>> iterable = [1, 2, 3, 4, 5, 6, 7]
>>> n = 3
>>> list(chunked_even(iterable, n)) # List lengths: 3, 2, 2
[[1, 2, 3], [4, 5], [6, 7]]
>>> list(chunked(iterable, n)) # List lengths: 3, 3, 1
[[1, 2, 3], [4, 5, 6], [7]]
"""
len_method = getattr(iterable, '__len__', None)
if len_method is None:
return _chunked_even_online(iterable, n)
else:
return _chunked_even_finite(iterable, len_method(), n)
def _chunked_even_online(iterable, n):
buffer = []
maxbuf = n + (n - 2) * (n - 1)
for x in iterable:
buffer.append(x)
if len(buffer) == maxbuf:
yield buffer[:n]
buffer = buffer[n:]
yield from _chunked_even_finite(buffer, len(buffer), n)
def _chunked_even_finite(iterable, N, n):
if N < 1:
return
# Lists are either size `full_size <= n` or `partial_size = full_size - 1`
q, r = divmod(N, n)
num_lists = q + (1 if r > 0 else 0)
q, r = divmod(N, num_lists)
full_size = q + (1 if r > 0 else 0)
partial_size = full_size - 1
num_full = N - partial_size * num_lists
num_partial = num_lists - num_full
buffer = []
iterator = iter(iterable)
# Yield num_full lists of full_size
for x in iterator:
buffer.append(x)
if len(buffer) == full_size:
yield buffer
buffer = []
num_full -= 1
if num_full <= 0:
break
# Yield num_partial lists of partial_size
for x in iterator:
buffer.append(x)
if len(buffer) == partial_size:
yield buffer
buffer = []
num_partial -= 1
def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False):
"""A version of :func:`zip` that "broadcasts" any scalar
(i.e., non-iterable) items into output tuples.
>>> iterable_1 = [1, 2, 3]
>>> iterable_2 = ['a', 'b', 'c']
>>> scalar = '_'
>>> list(zip_broadcast(iterable_1, iterable_2, scalar))
[(1, 'a', '_'), (2, 'b', '_'), (3, 'c', '_')]
The *scalar_types* keyword argument determines what types are considered
scalar. It is set to ``(str, bytes)`` by default. Set it to ``None`` to
treat strings and byte strings as iterable:
>>> list(zip_broadcast('abc', 0, 'xyz', scalar_types=None))
[('a', 0, 'x'), ('b', 0, 'y'), ('c', 0, 'z')]
If the *strict* keyword argument is ``True``, then
``UnequalIterablesError`` will be raised if any of the iterables have
different lengths.
"""
def is_scalar(obj):
if scalar_types and isinstance(obj, scalar_types):
return True
try:
iter(obj)
except TypeError:
return True
else:
return False
size = len(objects)
if not size:
return
iterables, iterable_positions = [], []
scalars, scalar_positions = [], []
for i, obj in enumerate(objects):
if is_scalar(obj):
scalars.append(obj)
scalar_positions.append(i)
else:
iterables.append(iter(obj))
iterable_positions.append(i)
if len(scalars) == size:
yield tuple(objects)
return
zipper = _zip_equal if strict else zip
for item in zipper(*iterables):
new_item = [None] * size
for i, elem in zip(iterable_positions, item):
new_item[i] = elem
for i, elem in zip(scalar_positions, scalars):
new_item[i] = elem
yield tuple(new_item)
def unique_in_window(iterable, n, key=None):
"""Yield the items from *iterable* that haven't been seen recently.
*n* is the size of the lookback window.
>>> iterable = [0, 1, 0, 2, 3, 0]
>>> n = 3
>>> list(unique_in_window(iterable, n))
[0, 1, 2, 3, 0]
The *key* function, if provided, will be used to determine uniqueness:
>>> list(unique_in_window('abAcda', 3, key=lambda x: x.lower()))
['a', 'b', 'c', 'd', 'a']
The items in *iterable* must be hashable.
"""
if n <= 0:
raise ValueError('n must be greater than 0')
window = deque(maxlen=n)
uniques = set()
use_key = key is not None
for item in iterable:
k = key(item) if use_key else item
if k in uniques:
continue
if len(uniques) == n:
uniques.discard(window[0])
uniques.add(k)
window.append(k)
yield item
def duplicates_everseen(iterable, key=None):
"""Yield duplicate elements after their first appearance.
>>> list(duplicates_everseen('mississippi'))
['s', 'i', 's', 's', 'i', 'p', 'i']
>>> list(duplicates_everseen('AaaBbbCccAaa', str.lower))
['a', 'a', 'b', 'b', 'c', 'c', 'A', 'a', 'a']
This function is analagous to :func:`unique_everseen` and is subject to
the same performance considerations.
"""
seen_set = set()
seen_list = []
use_key = key is not None
for element in iterable:
k = key(element) if use_key else element
try:
if k not in seen_set:
seen_set.add(k)
else:
yield element
except TypeError:
if k not in seen_list:
seen_list.append(k)
else:
yield element
def duplicates_justseen(iterable, key=None):
"""Yields serially-duplicate elements after their first appearance.
>>> list(duplicates_justseen('mississippi'))
['s', 's', 'p']
>>> list(duplicates_justseen('AaaBbbCccAaa', str.lower))
['a', 'a', 'b', 'b', 'c', 'c', 'a', 'a']
This function is analagous to :func:`unique_justseen`.
"""
return flatten(
map(
lambda group_tuple: islice_extended(group_tuple[1])[1:],
groupby(iterable, key),
)
)
def minmax(iterable_or_value, *others, key=None, default=_marker):
"""Returns both the smallest and largest items in an iterable
or the largest of two or more arguments.
>>> minmax([3, 1, 5])
(1, 5)
>>> minmax(4, 2, 6)
(2, 6)
If a *key* function is provided, it will be used to transform the input
items for comparison.
>>> minmax([5, 30], key=str) # '30' sorts before '5'
(30, 5)
If a *default* value is provided, it will be returned if there are no
input items.
>>> minmax([], default=(0, 0))
(0, 0)
Otherwise ``ValueError`` is raised.
This function is based on the
`recipe <http://code.activestate.com/recipes/577916/>`__ by
Raymond Hettinger and takes care to minimize the number of comparisons
performed.
"""
iterable = (iterable_or_value, *others) if others else iterable_or_value
it = iter(iterable)
try:
lo = hi = next(it)
except StopIteration as e:
if default is _marker:
raise ValueError(
'`minmax()` argument is an empty iterable. '
'Provide a `default` value to suppress this error.'
) from e
return default
# Different branches depending on the presence of key. This saves a lot
# of unimportant copies which would slow the "key=None" branch
# significantly down.
if key is None:
for x, y in zip_longest(it, it, fillvalue=lo):
if y < x:
x, y = y, x
if x < lo:
lo = x
if hi < y:
hi = y
else:
lo_key = hi_key = key(lo)
for x, y in zip_longest(it, it, fillvalue=lo):
x_key, y_key = key(x), key(y)
if y_key < x_key:
x, y, x_key, y_key = y, x, y_key, x_key
if x_key < lo_key:
lo, lo_key = x, x_key
if hi_key < y_key:
hi, hi_key = y, y_key
return lo, hi
def constrained_batches(
iterable, max_size, max_count=None, get_len=len, strict=True
):
"""Yield batches of items from *iterable* with a combined size limited by
*max_size*.
>>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1']
>>> list(constrained_batches(iterable, 10))
[(b'12345', b'123'), (b'12345678', b'1', b'1'), (b'12', b'1')]
If a *max_count* is supplied, the number of items per batch is also
limited:
>>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1']
>>> list(constrained_batches(iterable, 10, max_count = 2))
[(b'12345', b'123'), (b'12345678', b'1'), (b'1', b'12'), (b'1',)]
If a *get_len* function is supplied, use that instead of :func:`len` to
determine item size.
If *strict* is ``True``, raise ``ValueError`` if any single item is bigger
than *max_size*. Otherwise, allow single items to exceed *max_size*.
"""
if max_size <= 0:
raise ValueError('maximum size must be greater than zero')
batch = []
batch_size = 0
batch_count = 0
for item in iterable:
item_len = get_len(item)
if strict and item_len > max_size:
raise ValueError('item size exceeds maximum size')
reached_count = batch_count == max_count
reached_size = item_len + batch_size > max_size
if batch_count and (reached_size or reached_count):
yield tuple(batch)
batch.clear()
batch_size = 0
batch_count = 0
batch.append(item)
batch_size += item_len
batch_count += 1
if batch:
yield tuple(batch)
def gray_product(*iterables):
"""Like :func:`itertools.product`, but return tuples in an order such
that only one element in the generated tuple changes from one iteration
to the next.
>>> list(gray_product('AB','CD'))
[('A', 'C'), ('B', 'C'), ('B', 'D'), ('A', 'D')]
This function consumes all of the input iterables before producing output.
If any of the input iterables have fewer than two items, ``ValueError``
is raised.
For information on the algorithm, see
`this section <https://www-cs-faculty.stanford.edu/~knuth/fasc2a.ps.gz>`__
of Donald Knuth's *The Art of Computer Programming*.
"""
all_iterables = tuple(tuple(x) for x in iterables)
iterable_count = len(all_iterables)
for iterable in all_iterables:
if len(iterable) < 2:
raise ValueError("each iterable must have two or more items")
# This is based on "Algorithm H" from section 7.2.1.1, page 20.
# a holds the indexes of the source iterables for the n-tuple to be yielded
# f is the array of "focus pointers"
# o is the array of "directions"
a = [0] * iterable_count
f = list(range(iterable_count + 1))
o = [1] * iterable_count
while True:
yield tuple(all_iterables[i][a[i]] for i in range(iterable_count))
j = f[0]
f[0] = 0
if j == iterable_count:
break
a[j] = a[j] + o[j]
if a[j] == 0 or a[j] == len(all_iterables[j]) - 1:
o[j] = -o[j]
f[j] = f[j + 1]
f[j + 1] = j + 1
| 134,976
|
Python
|
.py
| 3,435
| 31.162154
| 92
| 0.572922
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,881
|
recipes.pyi
|
rembo10_headphones/lib/pkg_resources/_vendor/more_itertools/recipes.pyi
|
"""Stubs for more_itertools.recipes"""
from __future__ import annotations
from typing import (
Any,
Callable,
Iterable,
Iterator,
overload,
Sequence,
Type,
TypeVar,
)
# Type and type variable definitions
_T = TypeVar('_T')
_U = TypeVar('_U')
def take(n: int, iterable: Iterable[_T]) -> list[_T]: ...
def tabulate(
function: Callable[[int], _T], start: int = ...
) -> Iterator[_T]: ...
def tail(n: int, iterable: Iterable[_T]) -> Iterator[_T]: ...
def consume(iterator: Iterable[object], n: int | None = ...) -> None: ...
@overload
def nth(iterable: Iterable[_T], n: int) -> _T | None: ...
@overload
def nth(iterable: Iterable[_T], n: int, default: _U) -> _T | _U: ...
def all_equal(iterable: Iterable[object]) -> bool: ...
def quantify(
iterable: Iterable[_T], pred: Callable[[_T], bool] = ...
) -> int: ...
def pad_none(iterable: Iterable[_T]) -> Iterator[_T | None]: ...
def padnone(iterable: Iterable[_T]) -> Iterator[_T | None]: ...
def ncycles(iterable: Iterable[_T], n: int) -> Iterator[_T]: ...
def dotproduct(vec1: Iterable[object], vec2: Iterable[object]) -> object: ...
def flatten(listOfLists: Iterable[Iterable[_T]]) -> Iterator[_T]: ...
def repeatfunc(
func: Callable[..., _U], times: int | None = ..., *args: Any
) -> Iterator[_U]: ...
def pairwise(iterable: Iterable[_T]) -> Iterator[tuple[_T, _T]]: ...
def grouper(
iterable: Iterable[_T],
n: int,
incomplete: str = ...,
fillvalue: _U = ...,
) -> Iterator[tuple[_T | _U, ...]]: ...
def roundrobin(*iterables: Iterable[_T]) -> Iterator[_T]: ...
def partition(
pred: Callable[[_T], object] | None, iterable: Iterable[_T]
) -> tuple[Iterator[_T], Iterator[_T]]: ...
def powerset(iterable: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
def unique_everseen(
iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
) -> Iterator[_T]: ...
def unique_justseen(
iterable: Iterable[_T], key: Callable[[_T], object] | None = ...
) -> Iterator[_T]: ...
@overload
def iter_except(
func: Callable[[], _T],
exception: Type[BaseException] | tuple[Type[BaseException], ...],
first: None = ...,
) -> Iterator[_T]: ...
@overload
def iter_except(
func: Callable[[], _T],
exception: Type[BaseException] | tuple[Type[BaseException], ...],
first: Callable[[], _U],
) -> Iterator[_T | _U]: ...
@overload
def first_true(
iterable: Iterable[_T], *, pred: Callable[[_T], object] | None = ...
) -> _T | None: ...
@overload
def first_true(
iterable: Iterable[_T],
default: _U,
pred: Callable[[_T], object] | None = ...,
) -> _T | _U: ...
def random_product(
*args: Iterable[_T], repeat: int = ...
) -> tuple[_T, ...]: ...
def random_permutation(
iterable: Iterable[_T], r: int | None = ...
) -> tuple[_T, ...]: ...
def random_combination(iterable: Iterable[_T], r: int) -> tuple[_T, ...]: ...
def random_combination_with_replacement(
iterable: Iterable[_T], r: int
) -> tuple[_T, ...]: ...
def nth_combination(
iterable: Iterable[_T], r: int, index: int
) -> tuple[_T, ...]: ...
def prepend(value: _T, iterator: Iterable[_U]) -> Iterator[_T | _U]: ...
def convolve(signal: Iterable[_T], kernel: Iterable[_T]) -> Iterator[_T]: ...
def before_and_after(
predicate: Callable[[_T], bool], it: Iterable[_T]
) -> tuple[Iterator[_T], Iterator[_T]]: ...
def triplewise(iterable: Iterable[_T]) -> Iterator[tuple[_T, _T, _T]]: ...
def sliding_window(
iterable: Iterable[_T], n: int
) -> Iterator[tuple[_T, ...]]: ...
def subslices(iterable: Iterable[_T]) -> Iterator[list[_T]]: ...
def polynomial_from_roots(roots: Sequence[int]) -> list[int]: ...
def iter_index(
iterable: Iterable[object],
value: Any,
start: int | None = ...,
) -> Iterator[int]: ...
def sieve(n: int) -> Iterator[int]: ...
def batched(
iterable: Iterable[_T],
n: int,
) -> Iterator[list[_T]]: ...
def transpose(
it: Iterable[Iterable[_T]],
) -> tuple[Iterator[_T], ...]: ...
def matmul(m1: Sequence[_T], m2: Sequence[_T]) -> Iterator[list[_T]]: ...
def factor(n: int) -> Iterator[int]: ...
| 4,056
|
Python
|
.py
| 116
| 32.560345
| 77
| 0.601219
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,882
|
__init__.py
|
rembo10_headphones/lib/pkg_resources/_vendor/more_itertools/__init__.py
|
"""More routines for operating on iterables, beyond itertools"""
from .more import * # noqa
from .recipes import * # noqa
__version__ = '9.1.0'
| 148
|
Python
|
.py
| 4
| 35.5
| 64
| 0.697183
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,883
|
recipes.py
|
rembo10_headphones/lib/pkg_resources/_vendor/more_itertools/recipes.py
|
"""Imported from the recipes section of the itertools documentation.
All functions taken from the recipes section of the itertools library docs
[1]_.
Some backward-compatible usability improvements have been made.
.. [1] http://docs.python.org/library/itertools.html#recipes
"""
import math
import operator
import warnings
from collections import deque
from collections.abc import Sized
from functools import reduce
from itertools import (
chain,
combinations,
compress,
count,
cycle,
groupby,
islice,
product,
repeat,
starmap,
tee,
zip_longest,
)
from random import randrange, sample, choice
from sys import hexversion
__all__ = [
'all_equal',
'batched',
'before_and_after',
'consume',
'convolve',
'dotproduct',
'first_true',
'factor',
'flatten',
'grouper',
'iter_except',
'iter_index',
'matmul',
'ncycles',
'nth',
'nth_combination',
'padnone',
'pad_none',
'pairwise',
'partition',
'polynomial_from_roots',
'powerset',
'prepend',
'quantify',
'random_combination_with_replacement',
'random_combination',
'random_permutation',
'random_product',
'repeatfunc',
'roundrobin',
'sieve',
'sliding_window',
'subslices',
'tabulate',
'tail',
'take',
'transpose',
'triplewise',
'unique_everseen',
'unique_justseen',
]
_marker = object()
def take(n, iterable):
"""Return first *n* items of the iterable as a list.
>>> take(3, range(10))
[0, 1, 2]
If there are fewer than *n* items in the iterable, all of them are
returned.
>>> take(10, range(3))
[0, 1, 2]
"""
return list(islice(iterable, n))
def tabulate(function, start=0):
"""Return an iterator over the results of ``func(start)``,
``func(start + 1)``, ``func(start + 2)``...
*func* should be a function that accepts one integer argument.
If *start* is not specified it defaults to 0. It will be incremented each
time the iterator is advanced.
>>> square = lambda x: x ** 2
>>> iterator = tabulate(square, -3)
>>> take(4, iterator)
[9, 4, 1, 0]
"""
return map(function, count(start))
def tail(n, iterable):
"""Return an iterator over the last *n* items of *iterable*.
>>> t = tail(3, 'ABCDEFG')
>>> list(t)
['E', 'F', 'G']
"""
# If the given iterable has a length, then we can use islice to get its
# final elements. Note that if the iterable is not actually Iterable,
# either islice or deque will throw a TypeError. This is why we don't
# check if it is Iterable.
if isinstance(iterable, Sized):
yield from islice(iterable, max(0, len(iterable) - n), None)
else:
yield from iter(deque(iterable, maxlen=n))
def consume(iterator, n=None):
"""Advance *iterable* by *n* steps. If *n* is ``None``, consume it
entirely.
Efficiently exhausts an iterator without returning values. Defaults to
consuming the whole iterator, but an optional second argument may be
provided to limit consumption.
>>> i = (x for x in range(10))
>>> next(i)
0
>>> consume(i, 3)
>>> next(i)
4
>>> consume(i)
>>> next(i)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
If the iterator has fewer items remaining than the provided limit, the
whole iterator will be consumed.
>>> i = (x for x in range(3))
>>> consume(i, 5)
>>> next(i)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
deque(iterator, maxlen=0)
else:
# advance to the empty slice starting at position n
next(islice(iterator, n, n), None)
def nth(iterable, n, default=None):
"""Returns the nth item or a default value.
>>> l = range(10)
>>> nth(l, 3)
3
>>> nth(l, 20, "zebra")
'zebra'
"""
return next(islice(iterable, n, None), default)
def all_equal(iterable):
"""
Returns ``True`` if all the elements are equal to each other.
>>> all_equal('aaaa')
True
>>> all_equal('aaab')
False
"""
g = groupby(iterable)
return next(g, True) and not next(g, False)
def quantify(iterable, pred=bool):
"""Return the how many times the predicate is true.
>>> quantify([True, False, True])
2
"""
return sum(map(pred, iterable))
def pad_none(iterable):
"""Returns the sequence of elements and then returns ``None`` indefinitely.
>>> take(5, pad_none(range(3)))
[0, 1, 2, None, None]
Useful for emulating the behavior of the built-in :func:`map` function.
See also :func:`padded`.
"""
return chain(iterable, repeat(None))
padnone = pad_none
def ncycles(iterable, n):
"""Returns the sequence elements *n* times
>>> list(ncycles(["a", "b"], 3))
['a', 'b', 'a', 'b', 'a', 'b']
"""
return chain.from_iterable(repeat(tuple(iterable), n))
def dotproduct(vec1, vec2):
"""Returns the dot product of the two iterables.
>>> dotproduct([10, 10], [20, 20])
400
"""
return sum(map(operator.mul, vec1, vec2))
def flatten(listOfLists):
"""Return an iterator flattening one level of nesting in a list of lists.
>>> list(flatten([[0, 1], [2, 3]]))
[0, 1, 2, 3]
See also :func:`collapse`, which can flatten multiple levels of nesting.
"""
return chain.from_iterable(listOfLists)
def repeatfunc(func, times=None, *args):
"""Call *func* with *args* repeatedly, returning an iterable over the
results.
If *times* is specified, the iterable will terminate after that many
repetitions:
>>> from operator import add
>>> times = 4
>>> args = 3, 5
>>> list(repeatfunc(add, times, *args))
[8, 8, 8, 8]
If *times* is ``None`` the iterable will not terminate:
>>> from random import randrange
>>> times = None
>>> args = 1, 11
>>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP
[2, 4, 8, 1, 8, 4]
"""
if times is None:
return starmap(func, repeat(args))
return starmap(func, repeat(args, times))
def _pairwise(iterable):
"""Returns an iterator of paired items, overlapping, from the original
>>> take(4, pairwise(count()))
[(0, 1), (1, 2), (2, 3), (3, 4)]
On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`.
"""
a, b = tee(iterable)
next(b, None)
yield from zip(a, b)
try:
from itertools import pairwise as itertools_pairwise
except ImportError:
pairwise = _pairwise
else:
def pairwise(iterable):
yield from itertools_pairwise(iterable)
pairwise.__doc__ = _pairwise.__doc__
class UnequalIterablesError(ValueError):
def __init__(self, details=None):
msg = 'Iterables have different lengths'
if details is not None:
msg += (': index 0 has length {}; index {} has length {}').format(
*details
)
super().__init__(msg)
def _zip_equal_generator(iterables):
for combo in zip_longest(*iterables, fillvalue=_marker):
for val in combo:
if val is _marker:
raise UnequalIterablesError()
yield combo
def _zip_equal(*iterables):
# Check whether the iterables are all the same size.
try:
first_size = len(iterables[0])
for i, it in enumerate(iterables[1:], 1):
size = len(it)
if size != first_size:
break
else:
# If we didn't break out, we can use the built-in zip.
return zip(*iterables)
# If we did break out, there was a mismatch.
raise UnequalIterablesError(details=(first_size, i, size))
# If any one of the iterables didn't have a length, start reading
# them until one runs out.
except TypeError:
return _zip_equal_generator(iterables)
def grouper(iterable, n, incomplete='fill', fillvalue=None):
"""Group elements from *iterable* into fixed-length groups of length *n*.
>>> list(grouper('ABCDEF', 3))
[('A', 'B', 'C'), ('D', 'E', 'F')]
The keyword arguments *incomplete* and *fillvalue* control what happens for
iterables whose length is not a multiple of *n*.
When *incomplete* is `'fill'`, the last group will contain instances of
*fillvalue*.
>>> list(grouper('ABCDEFG', 3, incomplete='fill', fillvalue='x'))
[('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
When *incomplete* is `'ignore'`, the last group will not be emitted.
>>> list(grouper('ABCDEFG', 3, incomplete='ignore', fillvalue='x'))
[('A', 'B', 'C'), ('D', 'E', 'F')]
When *incomplete* is `'strict'`, a subclass of `ValueError` will be raised.
>>> it = grouper('ABCDEFG', 3, incomplete='strict')
>>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnequalIterablesError
"""
args = [iter(iterable)] * n
if incomplete == 'fill':
return zip_longest(*args, fillvalue=fillvalue)
if incomplete == 'strict':
return _zip_equal(*args)
if incomplete == 'ignore':
return zip(*args)
else:
raise ValueError('Expected fill, strict, or ignore')
def roundrobin(*iterables):
"""Yields an item from each iterable, alternating between them.
>>> list(roundrobin('ABC', 'D', 'EF'))
['A', 'D', 'E', 'B', 'F', 'C']
This function produces the same output as :func:`interleave_longest`, but
may perform better for some inputs (in particular when the number of
iterables is small).
"""
# Recipe credited to George Sakkis
pending = len(iterables)
nexts = cycle(iter(it).__next__ for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
def partition(pred, iterable):
"""
Returns a 2-tuple of iterables derived from the input iterable.
The first yields the items that have ``pred(item) == False``.
The second yields the items that have ``pred(item) == True``.
>>> is_odd = lambda x: x % 2 != 0
>>> iterable = range(10)
>>> even_items, odd_items = partition(is_odd, iterable)
>>> list(even_items), list(odd_items)
([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
If *pred* is None, :func:`bool` is used.
>>> iterable = [0, 1, False, True, '', ' ']
>>> false_items, true_items = partition(None, iterable)
>>> list(false_items), list(true_items)
([0, False, ''], [1, True, ' '])
"""
if pred is None:
pred = bool
evaluations = ((pred(x), x) for x in iterable)
t1, t2 = tee(evaluations)
return (
(x for (cond, x) in t1 if not cond),
(x for (cond, x) in t2 if cond),
)
def powerset(iterable):
"""Yields all possible subsets of the iterable.
>>> list(powerset([1, 2, 3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
:func:`powerset` will operate on iterables that aren't :class:`set`
instances, so repeated elements in the input will produce repeated elements
in the output. Use :func:`unique_everseen` on the input to avoid generating
duplicates:
>>> seq = [1, 1, 0]
>>> list(powerset(seq))
[(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)]
>>> from more_itertools import unique_everseen
>>> list(powerset(unique_everseen(seq)))
[(), (1,), (0,), (1, 0)]
"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
def unique_everseen(iterable, key=None):
"""
Yield unique elements, preserving order.
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
Sequences with a mix of hashable and unhashable items can be used.
The function will be slower (i.e., `O(n^2)`) for unhashable items.
Remember that ``list`` objects are unhashable - you can use the *key*
parameter to transform the list to a tuple (which is hashable) to
avoid a slowdown.
>>> iterable = ([1, 2], [2, 3], [1, 2])
>>> list(unique_everseen(iterable)) # Slow
[[1, 2], [2, 3]]
>>> list(unique_everseen(iterable, key=tuple)) # Faster
[[1, 2], [2, 3]]
Similary, you may want to convert unhashable ``set`` objects with
``key=frozenset``. For ``dict`` objects,
``key=lambda x: frozenset(x.items())`` can be used.
"""
seenset = set()
seenset_add = seenset.add
seenlist = []
seenlist_add = seenlist.append
use_key = key is not None
for element in iterable:
k = key(element) if use_key else element
try:
if k not in seenset:
seenset_add(k)
yield element
except TypeError:
if k not in seenlist:
seenlist_add(k)
yield element
def unique_justseen(iterable, key=None):
"""Yields elements in order, ignoring serial duplicates
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
"""
return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
def iter_except(func, exception, first=None):
"""Yields results from a function repeatedly until an exception is raised.
Converts a call-until-exception interface to an iterator interface.
Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel
to end the loop.
>>> l = [0, 1, 2]
>>> list(iter_except(l.pop, IndexError))
[2, 1, 0]
Multiple exceptions can be specified as a stopping condition:
>>> l = [1, 2, 3, '...', 4, 5, 6]
>>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
[7, 6, 5]
>>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
[4, 3, 2]
>>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
[]
"""
try:
if first is not None:
yield first()
while 1:
yield func()
except exception:
pass
def first_true(iterable, default=None, pred=None):
"""
Returns the first true value in the iterable.
If no true value is found, returns *default*
If *pred* is not None, returns the first item for which
``pred(item) == True`` .
>>> first_true(range(10))
1
>>> first_true(range(10), pred=lambda x: x > 5)
6
>>> first_true(range(10), default='missing', pred=lambda x: x > 9)
'missing'
"""
return next(filter(pred, iterable), default)
def random_product(*args, repeat=1):
"""Draw an item at random from each of the input iterables.
>>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP
('c', 3, 'Z')
If *repeat* is provided as a keyword argument, that many items will be
drawn from each iterable.
>>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP
('a', 2, 'd', 3)
This equivalent to taking a random selection from
``itertools.product(*args, **kwarg)``.
"""
pools = [tuple(pool) for pool in args] * repeat
return tuple(choice(pool) for pool in pools)
def random_permutation(iterable, r=None):
"""Return a random *r* length permutation of the elements in *iterable*.
If *r* is not specified or is ``None``, then *r* defaults to the length of
*iterable*.
>>> random_permutation(range(5)) # doctest:+SKIP
(3, 4, 0, 1, 2)
This equivalent to taking a random selection from
``itertools.permutations(iterable, r)``.
"""
pool = tuple(iterable)
r = len(pool) if r is None else r
return tuple(sample(pool, r))
def random_combination(iterable, r):
"""Return a random *r* length subsequence of the elements in *iterable*.
>>> random_combination(range(5), 3) # doctest:+SKIP
(2, 3, 4)
This equivalent to taking a random selection from
``itertools.combinations(iterable, r)``.
"""
pool = tuple(iterable)
n = len(pool)
indices = sorted(sample(range(n), r))
return tuple(pool[i] for i in indices)
def random_combination_with_replacement(iterable, r):
"""Return a random *r* length subsequence of elements in *iterable*,
allowing individual elements to be repeated.
>>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP
(0, 0, 1, 2, 2)
This equivalent to taking a random selection from
``itertools.combinations_with_replacement(iterable, r)``.
"""
pool = tuple(iterable)
n = len(pool)
indices = sorted(randrange(n) for i in range(r))
return tuple(pool[i] for i in indices)
def nth_combination(iterable, r, index):
"""Equivalent to ``list(combinations(iterable, r))[index]``.
The subsequences of *iterable* that are of length *r* can be ordered
lexicographically. :func:`nth_combination` computes the subsequence at
sort position *index* directly, without computing the previous
subsequences.
>>> nth_combination(range(5), 3, 5)
(0, 3, 4)
``ValueError`` will be raised If *r* is negative or greater than the length
of *iterable*.
``IndexError`` will be raised if the given *index* is invalid.
"""
pool = tuple(iterable)
n = len(pool)
if (r < 0) or (r > n):
raise ValueError
c = 1
k = min(r, n - r)
for i in range(1, k + 1):
c = c * (n - k + i) // i
if index < 0:
index += c
if (index < 0) or (index >= c):
raise IndexError
result = []
while r:
c, n, r = c * r // n, n - 1, r - 1
while index >= c:
index -= c
c, n = c * (n - r) // n, n - 1
result.append(pool[-1 - n])
return tuple(result)
def prepend(value, iterator):
"""Yield *value*, followed by the elements in *iterator*.
>>> value = '0'
>>> iterator = ['1', '2', '3']
>>> list(prepend(value, iterator))
['0', '1', '2', '3']
To prepend multiple values, see :func:`itertools.chain`
or :func:`value_chain`.
"""
return chain([value], iterator)
def convolve(signal, kernel):
"""Convolve the iterable *signal* with the iterable *kernel*.
>>> signal = (1, 2, 3, 4, 5)
>>> kernel = [3, 2, 1]
>>> list(convolve(signal, kernel))
[3, 8, 14, 20, 26, 14, 5]
Note: the input arguments are not interchangeable, as the *kernel*
is immediately consumed and stored.
"""
kernel = tuple(kernel)[::-1]
n = len(kernel)
window = deque([0], maxlen=n) * n
for x in chain(signal, repeat(0, n - 1)):
window.append(x)
yield sum(map(operator.mul, kernel, window))
def before_and_after(predicate, it):
"""A variant of :func:`takewhile` that allows complete access to the
remainder of the iterator.
>>> it = iter('ABCdEfGhI')
>>> all_upper, remainder = before_and_after(str.isupper, it)
>>> ''.join(all_upper)
'ABC'
>>> ''.join(remainder) # takewhile() would lose the 'd'
'dEfGhI'
Note that the first iterator must be fully consumed before the second
iterator can generate valid results.
"""
it = iter(it)
transition = []
def true_iterator():
for elem in it:
if predicate(elem):
yield elem
else:
transition.append(elem)
return
# Note: this is different from itertools recipes to allow nesting
# before_and_after remainders into before_and_after again. See tests
# for an example.
remainder_iterator = chain(transition, it)
return true_iterator(), remainder_iterator
def triplewise(iterable):
"""Return overlapping triplets from *iterable*.
>>> list(triplewise('ABCDE'))
[('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')]
"""
for (a, _), (b, c) in pairwise(pairwise(iterable)):
yield a, b, c
def sliding_window(iterable, n):
"""Return a sliding window of width *n* over *iterable*.
>>> list(sliding_window(range(6), 4))
[(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)]
If *iterable* has fewer than *n* items, then nothing is yielded:
>>> list(sliding_window(range(3), 4))
[]
For a variant with more features, see :func:`windowed`.
"""
it = iter(iterable)
window = deque(islice(it, n), maxlen=n)
if len(window) == n:
yield tuple(window)
for x in it:
window.append(x)
yield tuple(window)
def subslices(iterable):
"""Return all contiguous non-empty subslices of *iterable*.
>>> list(subslices('ABC'))
[['A'], ['A', 'B'], ['A', 'B', 'C'], ['B'], ['B', 'C'], ['C']]
This is similar to :func:`substrings`, but emits items in a different
order.
"""
seq = list(iterable)
slices = starmap(slice, combinations(range(len(seq) + 1), 2))
return map(operator.getitem, repeat(seq), slices)
def polynomial_from_roots(roots):
"""Compute a polynomial's coefficients from its roots.
>>> roots = [5, -4, 3] # (x - 5) * (x + 4) * (x - 3)
>>> polynomial_from_roots(roots) # x^3 - 4 * x^2 - 17 * x + 60
[1, -4, -17, 60]
"""
# Use math.prod for Python 3.8+,
prod = getattr(math, 'prod', lambda x: reduce(operator.mul, x, 1))
roots = list(map(operator.neg, roots))
return [
sum(map(prod, combinations(roots, k))) for k in range(len(roots) + 1)
]
def iter_index(iterable, value, start=0):
"""Yield the index of each place in *iterable* that *value* occurs,
beginning with index *start*.
See :func:`locate` for a more general means of finding the indexes
associated with particular values.
>>> list(iter_index('AABCADEAF', 'A'))
[0, 1, 4, 7]
"""
try:
seq_index = iterable.index
except AttributeError:
# Slow path for general iterables
it = islice(iterable, start, None)
for i, element in enumerate(it, start):
if element is value or element == value:
yield i
else:
# Fast path for sequences
i = start - 1
try:
while True:
i = seq_index(value, i + 1)
yield i
except ValueError:
pass
def sieve(n):
"""Yield the primes less than n.
>>> list(sieve(30))
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
"""
isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x)))
data = bytearray((0, 1)) * (n // 2)
data[:3] = 0, 0, 0
limit = isqrt(n) + 1
for p in compress(range(limit), data):
data[p * p : n : p + p] = bytes(len(range(p * p, n, p + p)))
data[2] = 1
return iter_index(data, 1) if n > 2 else iter([])
def batched(iterable, n):
"""Batch data into lists of length *n*. The last batch may be shorter.
>>> list(batched('ABCDEFG', 3))
[['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]
This recipe is from the ``itertools`` docs. This library also provides
:func:`chunked`, which has a different implementation.
"""
if hexversion >= 0x30C00A0: # Python 3.12.0a0
warnings.warn(
(
'batched will be removed in a future version of '
'more-itertools. Use the standard library '
'itertools.batched function instead'
),
DeprecationWarning,
)
it = iter(iterable)
while True:
batch = list(islice(it, n))
if not batch:
break
yield batch
def transpose(it):
"""Swap the rows and columns of the input.
>>> list(transpose([(1, 2, 3), (11, 22, 33)]))
[(1, 11), (2, 22), (3, 33)]
The caller should ensure that the dimensions of the input are compatible.
"""
# TODO: when 3.9 goes end-of-life, add stric=True to this.
return zip(*it)
def matmul(m1, m2):
"""Multiply two matrices.
>>> list(matmul([(7, 5), (3, 5)], [(2, 5), (7, 9)]))
[[49, 80], [41, 60]]
The caller should ensure that the dimensions of the input matrices are
compatible with each other.
"""
n = len(m2[0])
return batched(starmap(dotproduct, product(m1, transpose(m2))), n)
def factor(n):
"""Yield the prime factors of n.
>>> list(factor(360))
[2, 2, 2, 3, 3, 5]
"""
isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x)))
for prime in sieve(isqrt(n) + 1):
while True:
quotient, remainder = divmod(n, prime)
if remainder:
break
yield prime
n = quotient
if n == 1:
return
if n >= 2:
yield n
| 25,416
|
Python
|
.py
| 701
| 29.569187
| 79
| 0.594299
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,884
|
__main__.py
|
rembo10_headphones/lib/pkg_resources/_vendor/platformdirs/__main__.py
|
from __future__ import annotations
from platformdirs import PlatformDirs, __version__
PROPS = (
"user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"user_documents_dir",
"user_runtime_dir",
"site_data_dir",
"site_config_dir",
)
def main() -> None:
app_name = "MyApp"
app_author = "MyCompany"
print(f"-- platformdirs {__version__} --")
print("-- app dirs (with optional 'version')")
dirs = PlatformDirs(app_name, app_author, version="1.0")
for prop in PROPS:
print(f"{prop}: {getattr(dirs, prop)}")
print("\n-- app dirs (without optional 'version')")
dirs = PlatformDirs(app_name, app_author)
for prop in PROPS:
print(f"{prop}: {getattr(dirs, prop)}")
print("\n-- app dirs (without optional 'appauthor')")
dirs = PlatformDirs(app_name)
for prop in PROPS:
print(f"{prop}: {getattr(dirs, prop)}")
print("\n-- app dirs (with disabled 'appauthor')")
dirs = PlatformDirs(app_name, appauthor=False)
for prop in PROPS:
print(f"{prop}: {getattr(dirs, prop)}")
if __name__ == "__main__":
main()
| 1,164
|
Python
|
.py
| 35
| 28.171429
| 60
| 0.618068
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,885
|
windows.py
|
rembo10_headphones/lib/pkg_resources/_vendor/platformdirs/windows.py
|
from __future__ import annotations
import ctypes
import os
import sys
from functools import lru_cache
from typing import Callable
from .api import PlatformDirsABC
class Windows(PlatformDirsABC):
"""`MSDN on where to store app data files
<http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120>`_.
Makes use of the
`appname <platformdirs.api.PlatformDirsABC.appname>`,
`appauthor <platformdirs.api.PlatformDirsABC.appauthor>`,
`version <platformdirs.api.PlatformDirsABC.version>`,
`roaming <platformdirs.api.PlatformDirsABC.roaming>`,
`opinion <platformdirs.api.PlatformDirsABC.opinion>`."""
@property
def user_data_dir(self) -> str:
"""
:return: data directory tied to the user, e.g.
``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname`` (not roaming) or
``%USERPROFILE%\\AppData\\Roaming\\$appauthor\\$appname`` (roaming)
"""
const = "CSIDL_APPDATA" if self.roaming else "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(get_win_folder(const))
return self._append_parts(path)
def _append_parts(self, path: str, *, opinion_value: str | None = None) -> str:
params = []
if self.appname:
if self.appauthor is not False:
author = self.appauthor or self.appname
params.append(author)
params.append(self.appname)
if opinion_value is not None and self.opinion:
params.append(opinion_value)
if self.version:
params.append(self.version)
return os.path.join(path, *params)
@property
def site_data_dir(self) -> str:
""":return: data directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname``"""
path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA"))
return self._append_parts(path)
@property
def user_config_dir(self) -> str:
""":return: config directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def site_config_dir(self) -> str:
""":return: config directory shared by the users, same as `site_data_dir`"""
return self.site_data_dir
@property
def user_cache_dir(self) -> str:
"""
:return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g.
``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version``
"""
path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA"))
return self._append_parts(path, opinion_value="Cache")
@property
def user_state_dir(self) -> str:
""":return: state directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_log_dir(self) -> str:
"""
:return: log directory tied to the user, same as `user_data_dir` if not opinionated else ``Logs`` in it
"""
path = self.user_data_dir
if self.opinion:
path = os.path.join(path, "Logs")
return path
@property
def user_documents_dir(self) -> str:
"""
:return: documents directory tied to the user e.g. ``%USERPROFILE%\\Documents``
"""
return os.path.normpath(get_win_folder("CSIDL_PERSONAL"))
@property
def user_runtime_dir(self) -> str:
"""
:return: runtime directory tied to the user, e.g.
``%USERPROFILE%\\AppData\\Local\\Temp\\$appauthor\\$appname``
"""
path = os.path.normpath(os.path.join(get_win_folder("CSIDL_LOCAL_APPDATA"), "Temp"))
return self._append_parts(path)
def get_win_folder_from_env_vars(csidl_name: str) -> str:
"""Get folder from environment variables."""
if csidl_name == "CSIDL_PERSONAL": # does not have an environment name
return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Documents")
env_var_name = {
"CSIDL_APPDATA": "APPDATA",
"CSIDL_COMMON_APPDATA": "ALLUSERSPROFILE",
"CSIDL_LOCAL_APPDATA": "LOCALAPPDATA",
}.get(csidl_name)
if env_var_name is None:
raise ValueError(f"Unknown CSIDL name: {csidl_name}")
result = os.environ.get(env_var_name)
if result is None:
raise ValueError(f"Unset environment variable: {env_var_name}")
return result
def get_win_folder_from_registry(csidl_name: str) -> str:
"""Get folder from the registry.
This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
"CSIDL_PERSONAL": "Personal",
}.get(csidl_name)
if shell_folder_name is None:
raise ValueError(f"Unknown CSIDL name: {csidl_name}")
if sys.platform != "win32": # only needed for mypy type checker to know that this code runs only on Windows
raise NotImplementedError
import winreg
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
directory, _ = winreg.QueryValueEx(key, shell_folder_name)
return str(directory)
def get_win_folder_via_ctypes(csidl_name: str) -> str:
"""Get folder with ctypes."""
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
"CSIDL_PERSONAL": 5,
}.get(csidl_name)
if csidl_const is None:
raise ValueError(f"Unknown CSIDL name: {csidl_name}")
buf = ctypes.create_unicode_buffer(1024)
windll = getattr(ctypes, "windll") # noqa: B009 # using getattr to avoid false positive with mypy type checker
windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if it has highbit chars.
if any(ord(c) > 255 for c in buf):
buf2 = ctypes.create_unicode_buffer(1024)
if windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _pick_get_win_folder() -> Callable[[str], str]:
if hasattr(ctypes, "windll"):
return get_win_folder_via_ctypes
try:
import winreg # noqa: F401
except ImportError:
return get_win_folder_from_env_vars
else:
return get_win_folder_from_registry
get_win_folder = lru_cache(maxsize=None)(_pick_get_win_folder())
__all__ = [
"Windows",
]
| 6,596
|
Python
|
.py
| 153
| 35.947712
| 119
| 0.649875
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,886
|
macos.py
|
rembo10_headphones/lib/pkg_resources/_vendor/platformdirs/macos.py
|
from __future__ import annotations
import os
from .api import PlatformDirsABC
class MacOS(PlatformDirsABC):
"""
Platform directories for the macOS operating system. Follows the guidance from `Apple documentation
<https://developer.apple.com/library/archive/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/MacOSXDirectories/MacOSXDirectories.html>`_.
Makes use of the `appname <platformdirs.api.PlatformDirsABC.appname>` and
`version <platformdirs.api.PlatformDirsABC.version>`.
"""
@property
def user_data_dir(self) -> str:
""":return: data directory tied to the user, e.g. ``~/Library/Application Support/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Application Support/"))
@property
def site_data_dir(self) -> str:
""":return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``"""
return self._append_app_name_and_version("/Library/Application Support")
@property
def user_config_dir(self) -> str:
""":return: config directory tied to the user, e.g. ``~/Library/Preferences/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Preferences/"))
@property
def site_config_dir(self) -> str:
""":return: config directory shared by the users, e.g. ``/Library/Preferences/$appname``"""
return self._append_app_name_and_version("/Library/Preferences")
@property
def user_cache_dir(self) -> str:
""":return: cache directory tied to the user, e.g. ``~/Library/Caches/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches"))
@property
def user_state_dir(self) -> str:
""":return: state directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_log_dir(self) -> str:
""":return: log directory tied to the user, e.g. ``~/Library/Logs/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Logs"))
@property
def user_documents_dir(self) -> str:
""":return: documents directory tied to the user, e.g. ``~/Documents``"""
return os.path.expanduser("~/Documents")
@property
def user_runtime_dir(self) -> str:
""":return: runtime directory tied to the user, e.g. ``~/Library/Caches/TemporaryItems/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches/TemporaryItems"))
__all__ = [
"MacOS",
]
| 2,655
|
Python
|
.py
| 49
| 47.897959
| 160
| 0.683134
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,887
|
android.py
|
rembo10_headphones/lib/pkg_resources/_vendor/platformdirs/android.py
|
from __future__ import annotations
import os
import re
import sys
from functools import lru_cache
from typing import cast
from .api import PlatformDirsABC
class Android(PlatformDirsABC):
"""
Follows the guidance `from here <https://android.stackexchange.com/a/216132>`_. Makes use of the
`appname <platformdirs.api.PlatformDirsABC.appname>` and
`version <platformdirs.api.PlatformDirsABC.version>`.
"""
@property
def user_data_dir(self) -> str:
""":return: data directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/files/<AppName>``"""
return self._append_app_name_and_version(cast(str, _android_folder()), "files")
@property
def site_data_dir(self) -> str:
""":return: data directory shared by users, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_config_dir(self) -> str:
"""
:return: config directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/shared_prefs/<AppName>``
"""
return self._append_app_name_and_version(cast(str, _android_folder()), "shared_prefs")
@property
def site_config_dir(self) -> str:
""":return: config directory shared by the users, same as `user_config_dir`"""
return self.user_config_dir
@property
def user_cache_dir(self) -> str:
""":return: cache directory tied to the user, e.g. e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>``"""
return self._append_app_name_and_version(cast(str, _android_folder()), "cache")
@property
def user_state_dir(self) -> str:
""":return: state directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_log_dir(self) -> str:
"""
:return: log directory tied to the user, same as `user_cache_dir` if not opinionated else ``log`` in it,
e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/log``
"""
path = self.user_cache_dir
if self.opinion:
path = os.path.join(path, "log")
return path
@property
def user_documents_dir(self) -> str:
"""
:return: documents directory tied to the user e.g. ``/storage/emulated/0/Documents``
"""
return _android_documents_folder()
@property
def user_runtime_dir(self) -> str:
"""
:return: runtime directory tied to the user, same as `user_cache_dir` if not opinionated else ``tmp`` in it,
e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/tmp``
"""
path = self.user_cache_dir
if self.opinion:
path = os.path.join(path, "tmp")
return path
@lru_cache(maxsize=1)
def _android_folder() -> str | None:
""":return: base folder for the Android OS or None if cannot be found"""
try:
# First try to get path to android app via pyjnius
from jnius import autoclass
Context = autoclass("android.content.Context") # noqa: N806
result: str | None = Context.getFilesDir().getParentFile().getAbsolutePath()
except Exception:
# if fails find an android folder looking path on the sys.path
pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files")
for path in sys.path:
if pattern.match(path):
result = path.split("/files")[0]
break
else:
result = None
return result
@lru_cache(maxsize=1)
def _android_documents_folder() -> str:
""":return: documents folder for the Android OS"""
# Get directories with pyjnius
try:
from jnius import autoclass
Context = autoclass("android.content.Context") # noqa: N806
Environment = autoclass("android.os.Environment") # noqa: N806
documents_dir: str = Context.getExternalFilesDir(Environment.DIRECTORY_DOCUMENTS).getAbsolutePath()
except Exception:
documents_dir = "/storage/emulated/0/Documents"
return documents_dir
__all__ = [
"Android",
]
| 4,068
|
Python
|
.py
| 98
| 34.408163
| 120
| 0.636525
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,888
|
api.py
|
rembo10_headphones/lib/pkg_resources/_vendor/platformdirs/api.py
|
from __future__ import annotations
import os
import sys
from abc import ABC, abstractmethod
from pathlib import Path
if sys.version_info >= (3, 8): # pragma: no branch
from typing import Literal # pragma: no cover
class PlatformDirsABC(ABC):
"""
Abstract base class for platform directories.
"""
def __init__(
self,
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
multipath: bool = False,
opinion: bool = True,
):
"""
Create a new platform directory.
:param appname: See `appname`.
:param appauthor: See `appauthor`.
:param version: See `version`.
:param roaming: See `roaming`.
:param multipath: See `multipath`.
:param opinion: See `opinion`.
"""
self.appname = appname #: The name of application.
self.appauthor = appauthor
"""
The name of the app author or distributing body for this application. Typically, it is the owning company name.
Defaults to `appname`. You may pass ``False`` to disable it.
"""
self.version = version
"""
An optional version path element to append to the path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this would typically be ``<major>.<minor>``.
"""
self.roaming = roaming
"""
Whether to use the roaming appdata directory on Windows. That means that for users on a Windows network setup
for roaming profiles, this user data will be synced on login (see
`here <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>`_).
"""
self.multipath = multipath
"""
An optional parameter only applicable to Unix/Linux which indicates that the entire list of data dirs should be
returned. By default, the first item would only be returned.
"""
self.opinion = opinion #: A flag to indicating to use opinionated values.
def _append_app_name_and_version(self, *base: str) -> str:
params = list(base[1:])
if self.appname:
params.append(self.appname)
if self.version:
params.append(self.version)
return os.path.join(base[0], *params)
@property
@abstractmethod
def user_data_dir(self) -> str:
""":return: data directory tied to the user"""
@property
@abstractmethod
def site_data_dir(self) -> str:
""":return: data directory shared by users"""
@property
@abstractmethod
def user_config_dir(self) -> str:
""":return: config directory tied to the user"""
@property
@abstractmethod
def site_config_dir(self) -> str:
""":return: config directory shared by the users"""
@property
@abstractmethod
def user_cache_dir(self) -> str:
""":return: cache directory tied to the user"""
@property
@abstractmethod
def user_state_dir(self) -> str:
""":return: state directory tied to the user"""
@property
@abstractmethod
def user_log_dir(self) -> str:
""":return: log directory tied to the user"""
@property
@abstractmethod
def user_documents_dir(self) -> str:
""":return: documents directory tied to the user"""
@property
@abstractmethod
def user_runtime_dir(self) -> str:
""":return: runtime directory tied to the user"""
@property
def user_data_path(self) -> Path:
""":return: data path tied to the user"""
return Path(self.user_data_dir)
@property
def site_data_path(self) -> Path:
""":return: data path shared by users"""
return Path(self.site_data_dir)
@property
def user_config_path(self) -> Path:
""":return: config path tied to the user"""
return Path(self.user_config_dir)
@property
def site_config_path(self) -> Path:
""":return: config path shared by the users"""
return Path(self.site_config_dir)
@property
def user_cache_path(self) -> Path:
""":return: cache path tied to the user"""
return Path(self.user_cache_dir)
@property
def user_state_path(self) -> Path:
""":return: state path tied to the user"""
return Path(self.user_state_dir)
@property
def user_log_path(self) -> Path:
""":return: log path tied to the user"""
return Path(self.user_log_dir)
@property
def user_documents_path(self) -> Path:
""":return: documents path tied to the user"""
return Path(self.user_documents_dir)
@property
def user_runtime_path(self) -> Path:
""":return: runtime path tied to the user"""
return Path(self.user_runtime_dir)
| 4,910
|
Python
|
.py
| 131
| 30.183206
| 120
| 0.625578
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,889
|
unix.py
|
rembo10_headphones/lib/pkg_resources/_vendor/platformdirs/unix.py
|
from __future__ import annotations
import os
import sys
from configparser import ConfigParser
from pathlib import Path
from .api import PlatformDirsABC
if sys.platform.startswith("linux"): # pragma: no branch # no op check, only to please the type checker
from os import getuid
else:
def getuid() -> int:
raise RuntimeError("should only be used on Linux")
class Unix(PlatformDirsABC):
"""
On Unix/Linux, we follow the
`XDG Basedir Spec <https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_. The spec allows
overriding directories with environment variables. The examples show are the default values, alongside the name of
the environment variable that overrides them. Makes use of the
`appname <platformdirs.api.PlatformDirsABC.appname>`,
`version <platformdirs.api.PlatformDirsABC.version>`,
`multipath <platformdirs.api.PlatformDirsABC.multipath>`,
`opinion <platformdirs.api.PlatformDirsABC.opinion>`.
"""
@property
def user_data_dir(self) -> str:
"""
:return: data directory tied to the user, e.g. ``~/.local/share/$appname/$version`` or
``$XDG_DATA_HOME/$appname/$version``
"""
path = os.environ.get("XDG_DATA_HOME", "")
if not path.strip():
path = os.path.expanduser("~/.local/share")
return self._append_app_name_and_version(path)
@property
def site_data_dir(self) -> str:
"""
:return: data directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>` is
enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS
path separator), e.g. ``/usr/local/share/$appname/$version`` or ``/usr/share/$appname/$version``
"""
# XDG default for $XDG_DATA_DIRS; only first, if multipath is False
path = os.environ.get("XDG_DATA_DIRS", "")
if not path.strip():
path = f"/usr/local/share{os.pathsep}/usr/share"
return self._with_multi_path(path)
def _with_multi_path(self, path: str) -> str:
path_list = path.split(os.pathsep)
if not self.multipath:
path_list = path_list[0:1]
path_list = [self._append_app_name_and_version(os.path.expanduser(p)) for p in path_list]
return os.pathsep.join(path_list)
@property
def user_config_dir(self) -> str:
"""
:return: config directory tied to the user, e.g. ``~/.config/$appname/$version`` or
``$XDG_CONFIG_HOME/$appname/$version``
"""
path = os.environ.get("XDG_CONFIG_HOME", "")
if not path.strip():
path = os.path.expanduser("~/.config")
return self._append_app_name_and_version(path)
@property
def site_config_dir(self) -> str:
"""
:return: config directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>`
is enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS
path separator), e.g. ``/etc/xdg/$appname/$version``
"""
# XDG default for $XDG_CONFIG_DIRS only first, if multipath is False
path = os.environ.get("XDG_CONFIG_DIRS", "")
if not path.strip():
path = "/etc/xdg"
return self._with_multi_path(path)
@property
def user_cache_dir(self) -> str:
"""
:return: cache directory tied to the user, e.g. ``~/.cache/$appname/$version`` or
``~/$XDG_CACHE_HOME/$appname/$version``
"""
path = os.environ.get("XDG_CACHE_HOME", "")
if not path.strip():
path = os.path.expanduser("~/.cache")
return self._append_app_name_and_version(path)
@property
def user_state_dir(self) -> str:
"""
:return: state directory tied to the user, e.g. ``~/.local/state/$appname/$version`` or
``$XDG_STATE_HOME/$appname/$version``
"""
path = os.environ.get("XDG_STATE_HOME", "")
if not path.strip():
path = os.path.expanduser("~/.local/state")
return self._append_app_name_and_version(path)
@property
def user_log_dir(self) -> str:
"""
:return: log directory tied to the user, same as `user_state_dir` if not opinionated else ``log`` in it
"""
path = self.user_state_dir
if self.opinion:
path = os.path.join(path, "log")
return path
@property
def user_documents_dir(self) -> str:
"""
:return: documents directory tied to the user, e.g. ``~/Documents``
"""
documents_dir = _get_user_dirs_folder("XDG_DOCUMENTS_DIR")
if documents_dir is None:
documents_dir = os.environ.get("XDG_DOCUMENTS_DIR", "").strip()
if not documents_dir:
documents_dir = os.path.expanduser("~/Documents")
return documents_dir
@property
def user_runtime_dir(self) -> str:
"""
:return: runtime directory tied to the user, e.g. ``/run/user/$(id -u)/$appname/$version`` or
``$XDG_RUNTIME_DIR/$appname/$version``
"""
path = os.environ.get("XDG_RUNTIME_DIR", "")
if not path.strip():
path = f"/run/user/{getuid()}"
return self._append_app_name_and_version(path)
@property
def site_data_path(self) -> Path:
""":return: data path shared by users. Only return first item, even if ``multipath`` is set to ``True``"""
return self._first_item_as_path_if_multipath(self.site_data_dir)
@property
def site_config_path(self) -> Path:
""":return: config path shared by the users. Only return first item, even if ``multipath`` is set to ``True``"""
return self._first_item_as_path_if_multipath(self.site_config_dir)
def _first_item_as_path_if_multipath(self, directory: str) -> Path:
if self.multipath:
# If multipath is True, the first path is returned.
directory = directory.split(os.pathsep)[0]
return Path(directory)
def _get_user_dirs_folder(key: str) -> str | None:
"""Return directory from user-dirs.dirs config file. See https://freedesktop.org/wiki/Software/xdg-user-dirs/"""
user_dirs_config_path = os.path.join(Unix().user_config_dir, "user-dirs.dirs")
if os.path.exists(user_dirs_config_path):
parser = ConfigParser()
with open(user_dirs_config_path) as stream:
# Add fake section header, so ConfigParser doesn't complain
parser.read_string(f"[top]\n{stream.read()}")
if key not in parser["top"]:
return None
path = parser["top"][key].strip('"')
# Handle relative home paths
path = path.replace("$HOME", os.path.expanduser("~"))
return path
return None
__all__ = [
"Unix",
]
| 6,911
|
Python
|
.py
| 153
| 37.156863
| 120
| 0.623477
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,890
|
__init__.py
|
rembo10_headphones/lib/pkg_resources/_vendor/platformdirs/__init__.py
|
"""
Utilities for determining application-specific dirs. See <https://github.com/platformdirs/platformdirs> for details and
usage.
"""
from __future__ import annotations
import os
import sys
from pathlib import Path
if sys.version_info >= (3, 8): # pragma: no cover (py38+)
from typing import Literal
else: # pragma: no cover (py38+)
from ..typing_extensions import Literal
from .api import PlatformDirsABC
from .version import __version__
from .version import __version_tuple__ as __version_info__
def _set_platform_dir_class() -> type[PlatformDirsABC]:
if sys.platform == "win32":
from .windows import Windows as Result
elif sys.platform == "darwin":
from .macos import MacOS as Result
else:
from .unix import Unix as Result
if os.getenv("ANDROID_DATA") == "/data" and os.getenv("ANDROID_ROOT") == "/system":
if os.getenv("SHELL") or os.getenv("PREFIX"):
return Result
from .android import _android_folder
if _android_folder() is not None:
from .android import Android
return Android # return to avoid redefinition of result
return Result
PlatformDirs = _set_platform_dir_class() #: Currently active platform
AppDirs = PlatformDirs #: Backwards compatibility with appdirs
def user_data_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: data directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_data_dir
def site_data_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
multipath: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.
:returns: data directory shared by users
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_data_dir
def user_config_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: config directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_config_dir
def site_config_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
multipath: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.
:returns: config directory shared by the users
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_config_dir
def user_cache_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: cache directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_cache_dir
def user_state_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: state directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_state_dir
def user_log_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: log directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_log_dir
def user_documents_dir() -> str:
"""
:returns: documents directory tied to the user
"""
return PlatformDirs().user_documents_dir
def user_runtime_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: runtime directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_runtime_dir
def user_data_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: data path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_data_path
def site_data_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
multipath: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param multipath: See `multipath <platformdirs.api.PlatformDirsABC.multipath>`.
:returns: data path shared by users
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_data_path
def user_config_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: config path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_config_path
def site_config_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
multipath: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.
:returns: config path shared by the users
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_config_path
def user_cache_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: cache path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_cache_path
def user_state_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: state path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_state_path
def user_log_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: log path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_log_path
def user_documents_path() -> Path:
"""
:returns: documents path tied to the user
"""
return PlatformDirs().user_documents_path
def user_runtime_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: runtime path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_runtime_path
__all__ = [
"__version__",
"__version_info__",
"PlatformDirs",
"AppDirs",
"PlatformDirsABC",
"user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"user_documents_dir",
"user_runtime_dir",
"site_data_dir",
"site_config_dir",
"user_data_path",
"user_config_path",
"user_cache_path",
"user_state_path",
"user_log_path",
"user_documents_path",
"user_runtime_path",
"site_data_path",
"site_config_path",
]
| 12,806
|
Python
|
.py
| 291
| 39.381443
| 119
| 0.71181
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,891
|
version.py
|
rembo10_headphones/lib/pkg_resources/_vendor/platformdirs/version.py
|
# file generated by setuptools_scm
# don't change, don't track in version control
__version__ = version = '2.6.2'
__version_tuple__ = version_tuple = (2, 6, 2)
| 160
|
Python
|
.py
| 4
| 39
| 46
| 0.685897
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,892
|
_itertools.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/_itertools.py
|
from itertools import filterfalse
from typing import (
Callable,
Iterable,
Iterator,
Optional,
Set,
TypeVar,
Union,
)
# Type and type variable definitions
_T = TypeVar('_T')
_U = TypeVar('_U')
def unique_everseen(
iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None
) -> Iterator[_T]:
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen: Set[Union[_T, _U]] = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
| 884
|
Python
|
.py
| 31
| 22.354839
| 78
| 0.60424
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,893
|
_compat.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/_compat.py
|
# flake8: noqa
import abc
import os
import sys
import pathlib
from contextlib import suppress
from typing import Union
if sys.version_info >= (3, 10):
from zipfile import Path as ZipPath # type: ignore
else:
from ..zipp import Path as ZipPath # type: ignore
try:
from typing import runtime_checkable # type: ignore
except ImportError:
def runtime_checkable(cls): # type: ignore
return cls
try:
from typing import Protocol # type: ignore
except ImportError:
Protocol = abc.ABC # type: ignore
class TraversableResourcesLoader:
"""
Adapt loaders to provide TraversableResources and other
compatibility.
Used primarily for Python 3.9 and earlier where the native
loaders do not yet implement TraversableResources.
"""
def __init__(self, spec):
self.spec = spec
@property
def path(self):
return self.spec.origin
def get_resource_reader(self, name):
from . import readers, _adapters
def _zip_reader(spec):
with suppress(AttributeError):
return readers.ZipReader(spec.loader, spec.name)
def _namespace_reader(spec):
with suppress(AttributeError, ValueError):
return readers.NamespaceReader(spec.submodule_search_locations)
def _available_reader(spec):
with suppress(AttributeError):
return spec.loader.get_resource_reader(spec.name)
def _native_reader(spec):
reader = _available_reader(spec)
return reader if hasattr(reader, 'files') else None
def _file_reader(spec):
try:
path = pathlib.Path(self.path)
except TypeError:
return None
if path.exists():
return readers.FileReader(self)
return (
# native reader if it supplies 'files'
_native_reader(self.spec)
or
# local ZipReader if a zip module
_zip_reader(self.spec)
or
# local NamespaceReader if a namespace module
_namespace_reader(self.spec)
or
# local FileReader
_file_reader(self.spec)
# fallback - adapt the spec ResourceReader to TraversableReader
or _adapters.CompatibilityFiles(self.spec)
)
def wrap_spec(package):
"""
Construct a package spec with traversable compatibility
on the spec/loader/reader.
Supersedes _adapters.wrap_spec to use TraversableResourcesLoader
from above for older Python compatibility (<3.10).
"""
from . import _adapters
return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
if sys.version_info >= (3, 9):
StrPath = Union[str, os.PathLike[str]]
else:
# PathLike is only subscriptable at runtime in 3.9+
StrPath = Union[str, "os.PathLike[str]"]
| 2,925
|
Python
|
.py
| 82
| 27.719512
| 84
| 0.651047
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,894
|
_legacy.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/_legacy.py
|
import functools
import os
import pathlib
import types
import warnings
from typing import Union, Iterable, ContextManager, BinaryIO, TextIO, Any
from . import _common
Package = Union[types.ModuleType, str]
Resource = str
def deprecated(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(
f"{func.__name__} is deprecated. Use files() instead. "
"Refer to https://importlib-resources.readthedocs.io"
"/en/latest/using.html#migrating-from-legacy for migration advice.",
DeprecationWarning,
stacklevel=2,
)
return func(*args, **kwargs)
return wrapper
def normalize_path(path: Any) -> str:
"""Normalize a path by ensuring it is a string.
If the resulting string contains path separators, an exception is raised.
"""
str_path = str(path)
parent, file_name = os.path.split(str_path)
if parent:
raise ValueError(f'{path!r} must be only a file name')
return file_name
@deprecated
def open_binary(package: Package, resource: Resource) -> BinaryIO:
"""Return a file-like object opened for binary reading of the resource."""
return (_common.files(package) / normalize_path(resource)).open('rb')
@deprecated
def read_binary(package: Package, resource: Resource) -> bytes:
"""Return the binary contents of the resource."""
return (_common.files(package) / normalize_path(resource)).read_bytes()
@deprecated
def open_text(
package: Package,
resource: Resource,
encoding: str = 'utf-8',
errors: str = 'strict',
) -> TextIO:
"""Return a file-like object opened for text reading of the resource."""
return (_common.files(package) / normalize_path(resource)).open(
'r', encoding=encoding, errors=errors
)
@deprecated
def read_text(
package: Package,
resource: Resource,
encoding: str = 'utf-8',
errors: str = 'strict',
) -> str:
"""Return the decoded string of the resource.
The decoding-related arguments have the same semantics as those of
bytes.decode().
"""
with open_text(package, resource, encoding, errors) as fp:
return fp.read()
@deprecated
def contents(package: Package) -> Iterable[str]:
"""Return an iterable of entries in `package`.
Note that not all entries are resources. Specifically, directories are
not considered resources. Use `is_resource()` on each entry returned here
to check if it is a resource or not.
"""
return [path.name for path in _common.files(package).iterdir()]
@deprecated
def is_resource(package: Package, name: str) -> bool:
"""True if `name` is a resource inside `package`.
Directories are *not* resources.
"""
resource = normalize_path(name)
return any(
traversable.name == resource and traversable.is_file()
for traversable in _common.files(package).iterdir()
)
@deprecated
def path(
package: Package,
resource: Resource,
) -> ContextManager[pathlib.Path]:
"""A context manager providing a file path object to the resource.
If the resource does not already exist on its own on the file system,
a temporary file will be created. If the file was created, the file
will be deleted upon exiting the context manager (no exception is
raised if the file was deleted prior to the context manager
exiting).
"""
return _common.as_file(_common.files(package) / normalize_path(resource))
| 3,481
|
Python
|
.py
| 93
| 32.569892
| 80
| 0.697709
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,895
|
abc.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/abc.py
|
import abc
import io
import itertools
import pathlib
from typing import Any, BinaryIO, Iterable, Iterator, NoReturn, Text, Optional
from ._compat import runtime_checkable, Protocol, StrPath
__all__ = ["ResourceReader", "Traversable", "TraversableResources"]
class ResourceReader(metaclass=abc.ABCMeta):
"""Abstract base class for loaders to provide resource reading support."""
@abc.abstractmethod
def open_resource(self, resource: Text) -> BinaryIO:
"""Return an opened, file-like object for binary reading.
The 'resource' argument is expected to represent only a file name.
If the resource cannot be found, FileNotFoundError is raised.
"""
# This deliberately raises FileNotFoundError instead of
# NotImplementedError so that if this method is accidentally called,
# it'll still do the right thing.
raise FileNotFoundError
@abc.abstractmethod
def resource_path(self, resource: Text) -> Text:
"""Return the file system path to the specified resource.
The 'resource' argument is expected to represent only a file name.
If the resource does not exist on the file system, raise
FileNotFoundError.
"""
# This deliberately raises FileNotFoundError instead of
# NotImplementedError so that if this method is accidentally called,
# it'll still do the right thing.
raise FileNotFoundError
@abc.abstractmethod
def is_resource(self, path: Text) -> bool:
"""Return True if the named 'path' is a resource.
Files are resources, directories are not.
"""
raise FileNotFoundError
@abc.abstractmethod
def contents(self) -> Iterable[str]:
"""Return an iterable of entries in `package`."""
raise FileNotFoundError
class TraversalError(Exception):
pass
@runtime_checkable
class Traversable(Protocol):
"""
An object with a subset of pathlib.Path methods suitable for
traversing directories and opening files.
Any exceptions that occur when accessing the backing resource
may propagate unaltered.
"""
@abc.abstractmethod
def iterdir(self) -> Iterator["Traversable"]:
"""
Yield Traversable objects in self
"""
def read_bytes(self) -> bytes:
"""
Read contents of self as bytes
"""
with self.open('rb') as strm:
return strm.read()
def read_text(self, encoding: Optional[str] = None) -> str:
"""
Read contents of self as text
"""
with self.open(encoding=encoding) as strm:
return strm.read()
@abc.abstractmethod
def is_dir(self) -> bool:
"""
Return True if self is a directory
"""
@abc.abstractmethod
def is_file(self) -> bool:
"""
Return True if self is a file
"""
def joinpath(self, *descendants: StrPath) -> "Traversable":
"""
Return Traversable resolved with any descendants applied.
Each descendant should be a path segment relative to self
and each may contain multiple levels separated by
``posixpath.sep`` (``/``).
"""
if not descendants:
return self
names = itertools.chain.from_iterable(
path.parts for path in map(pathlib.PurePosixPath, descendants)
)
target = next(names)
matches = (
traversable for traversable in self.iterdir() if traversable.name == target
)
try:
match = next(matches)
except StopIteration:
raise TraversalError(
"Target not found during traversal.", target, list(names)
)
return match.joinpath(*names)
def __truediv__(self, child: StrPath) -> "Traversable":
"""
Return Traversable child in self
"""
return self.joinpath(child)
@abc.abstractmethod
def open(self, mode='r', *args, **kwargs):
"""
mode may be 'r' or 'rb' to open as text or binary. Return a handle
suitable for reading (same as pathlib.Path.open).
When opening as text, accepts encoding parameters such as those
accepted by io.TextIOWrapper.
"""
@property
@abc.abstractmethod
def name(self) -> str:
"""
The base name of this object without any parent references.
"""
class TraversableResources(ResourceReader):
"""
The required interface for providing traversable
resources.
"""
@abc.abstractmethod
def files(self) -> "Traversable":
"""Return a Traversable object for the loaded package."""
def open_resource(self, resource: StrPath) -> io.BufferedReader:
return self.files().joinpath(resource).open('rb')
def resource_path(self, resource: Any) -> NoReturn:
raise FileNotFoundError(resource)
def is_resource(self, path: StrPath) -> bool:
return self.files().joinpath(path).is_file()
def contents(self) -> Iterator[str]:
return (item.name for item in self.files().iterdir())
| 5,140
|
Python
|
.py
| 135
| 30.444444
| 87
| 0.649899
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,896
|
simple.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/simple.py
|
"""
Interface adapters for low-level readers.
"""
import abc
import io
import itertools
from typing import BinaryIO, List
from .abc import Traversable, TraversableResources
class SimpleReader(abc.ABC):
"""
The minimum, low-level interface required from a resource
provider.
"""
@property
@abc.abstractmethod
def package(self) -> str:
"""
The name of the package for which this reader loads resources.
"""
@abc.abstractmethod
def children(self) -> List['SimpleReader']:
"""
Obtain an iterable of SimpleReader for available
child containers (e.g. directories).
"""
@abc.abstractmethod
def resources(self) -> List[str]:
"""
Obtain available named resources for this virtual package.
"""
@abc.abstractmethod
def open_binary(self, resource: str) -> BinaryIO:
"""
Obtain a File-like for a named resource.
"""
@property
def name(self):
return self.package.split('.')[-1]
class ResourceContainer(Traversable):
"""
Traversable container for a package's resources via its reader.
"""
def __init__(self, reader: SimpleReader):
self.reader = reader
def is_dir(self):
return True
def is_file(self):
return False
def iterdir(self):
files = (ResourceHandle(self, name) for name in self.reader.resources)
dirs = map(ResourceContainer, self.reader.children())
return itertools.chain(files, dirs)
def open(self, *args, **kwargs):
raise IsADirectoryError()
class ResourceHandle(Traversable):
"""
Handle to a named resource in a ResourceReader.
"""
def __init__(self, parent: ResourceContainer, name: str):
self.parent = parent
self.name = name # type: ignore
def is_file(self):
return True
def is_dir(self):
return False
def open(self, mode='r', *args, **kwargs):
stream = self.parent.reader.open_binary(self.name)
if 'b' not in mode:
stream = io.TextIOWrapper(*args, **kwargs)
return stream
def joinpath(self, name):
raise RuntimeError("Cannot traverse into a resource")
class TraversableReader(TraversableResources, SimpleReader):
"""
A TraversableResources based on SimpleReader. Resource providers
may derive from this class to provide the TraversableResources
interface by supplying the SimpleReader interface.
"""
def files(self):
return ResourceContainer(self)
| 2,576
|
Python
|
.py
| 80
| 25.875
| 78
| 0.660729
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,897
|
__init__.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/__init__.py
|
"""Read resources contained within a package."""
from ._common import (
as_file,
files,
Package,
)
from ._legacy import (
contents,
open_binary,
read_binary,
open_text,
read_text,
is_resource,
path,
Resource,
)
from .abc import ResourceReader
__all__ = [
'Package',
'Resource',
'ResourceReader',
'as_file',
'contents',
'files',
'is_resource',
'open_binary',
'open_text',
'path',
'read_binary',
'read_text',
]
| 506
|
Python
|
.py
| 31
| 12.193548
| 48
| 0.595745
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,898
|
_common.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/_common.py
|
import os
import pathlib
import tempfile
import functools
import contextlib
import types
import importlib
import inspect
import warnings
import itertools
from typing import Union, Optional, cast
from .abc import ResourceReader, Traversable
from ._compat import wrap_spec
Package = Union[types.ModuleType, str]
Anchor = Package
def package_to_anchor(func):
"""
Replace 'package' parameter as 'anchor' and warn about the change.
Other errors should fall through.
>>> files('a', 'b')
Traceback (most recent call last):
TypeError: files() takes from 0 to 1 positional arguments but 2 were given
"""
undefined = object()
@functools.wraps(func)
def wrapper(anchor=undefined, package=undefined):
if package is not undefined:
if anchor is not undefined:
return func(anchor, package)
warnings.warn(
"First parameter to files is renamed to 'anchor'",
DeprecationWarning,
stacklevel=2,
)
return func(package)
elif anchor is undefined:
return func()
return func(anchor)
return wrapper
@package_to_anchor
def files(anchor: Optional[Anchor] = None) -> Traversable:
"""
Get a Traversable resource for an anchor.
"""
return from_package(resolve(anchor))
def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]:
"""
Return the package's loader if it's a ResourceReader.
"""
# We can't use
# a issubclass() check here because apparently abc.'s __subclasscheck__()
# hook wants to create a weak reference to the object, but
# zipimport.zipimporter does not support weak references, resulting in a
# TypeError. That seems terrible.
spec = package.__spec__
reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore
if reader is None:
return None
return reader(spec.name) # type: ignore
@functools.singledispatch
def resolve(cand: Optional[Anchor]) -> types.ModuleType:
return cast(types.ModuleType, cand)
@resolve.register
def _(cand: str) -> types.ModuleType:
return importlib.import_module(cand)
@resolve.register
def _(cand: None) -> types.ModuleType:
return resolve(_infer_caller().f_globals['__name__'])
def _infer_caller():
"""
Walk the stack and find the frame of the first caller not in this module.
"""
def is_this_file(frame_info):
return frame_info.filename == __file__
def is_wrapper(frame_info):
return frame_info.function == 'wrapper'
not_this_file = itertools.filterfalse(is_this_file, inspect.stack())
# also exclude 'wrapper' due to singledispatch in the call stack
callers = itertools.filterfalse(is_wrapper, not_this_file)
return next(callers).frame
def from_package(package: types.ModuleType):
"""
Return a Traversable object for the given package.
"""
spec = wrap_spec(package)
reader = spec.loader.get_resource_reader(spec.name)
return reader.files()
@contextlib.contextmanager
def _tempfile(
reader,
suffix='',
# gh-93353: Keep a reference to call os.remove() in late Python
# finalization.
*,
_os_remove=os.remove,
):
# Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
# blocks due to the need to close the temporary file to work on Windows
# properly.
fd, raw_path = tempfile.mkstemp(suffix=suffix)
try:
try:
os.write(fd, reader())
finally:
os.close(fd)
del reader
yield pathlib.Path(raw_path)
finally:
try:
_os_remove(raw_path)
except FileNotFoundError:
pass
def _temp_file(path):
return _tempfile(path.read_bytes, suffix=path.name)
def _is_present_dir(path: Traversable) -> bool:
"""
Some Traversables implement ``is_dir()`` to raise an
exception (i.e. ``FileNotFoundError``) when the
directory doesn't exist. This function wraps that call
to always return a boolean and only return True
if there's a dir and it exists.
"""
with contextlib.suppress(FileNotFoundError):
return path.is_dir()
return False
@functools.singledispatch
def as_file(path):
"""
Given a Traversable object, return that object as a
path on the local file system in a context manager.
"""
return _temp_dir(path) if _is_present_dir(path) else _temp_file(path)
@as_file.register(pathlib.Path)
@contextlib.contextmanager
def _(path):
"""
Degenerate behavior for pathlib.Path objects.
"""
yield path
@contextlib.contextmanager
def _temp_path(dir: tempfile.TemporaryDirectory):
"""
Wrap tempfile.TemporyDirectory to return a pathlib object.
"""
with dir as result:
yield pathlib.Path(result)
@contextlib.contextmanager
def _temp_dir(path):
"""
Given a traversable dir, recursively replicate the whole tree
to the file system in a context manager.
"""
assert path.is_dir()
with _temp_path(tempfile.TemporaryDirectory()) as temp_dir:
yield _write_contents(temp_dir, path)
def _write_contents(target, source):
child = target.joinpath(source.name)
if source.is_dir():
child.mkdir()
for item in source.iterdir():
_write_contents(child, item)
else:
child.write_bytes(source.read_bytes())
return child
| 5,457
|
Python
|
.py
| 164
| 27.817073
| 79
| 0.684571
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,899
|
readers.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/readers.py
|
import collections
import pathlib
import operator
from . import abc
from ._itertools import unique_everseen
from ._compat import ZipPath
def remove_duplicates(items):
return iter(collections.OrderedDict.fromkeys(items))
class FileReader(abc.TraversableResources):
def __init__(self, loader):
self.path = pathlib.Path(loader.path).parent
def resource_path(self, resource):
"""
Return the file system path to prevent
`resources.path()` from creating a temporary
copy.
"""
return str(self.path.joinpath(resource))
def files(self):
return self.path
class ZipReader(abc.TraversableResources):
def __init__(self, loader, module):
_, _, name = module.rpartition('.')
self.prefix = loader.prefix.replace('\\', '/') + name + '/'
self.archive = loader.archive
def open_resource(self, resource):
try:
return super().open_resource(resource)
except KeyError as exc:
raise FileNotFoundError(exc.args[0])
def is_resource(self, path):
# workaround for `zipfile.Path.is_file` returning true
# for non-existent paths.
target = self.files().joinpath(path)
return target.is_file() and target.exists()
def files(self):
return ZipPath(self.archive, self.prefix)
class MultiplexedPath(abc.Traversable):
"""
Given a series of Traversable objects, implement a merged
version of the interface across all objects. Useful for
namespace packages which may be multihomed at a single
name.
"""
def __init__(self, *paths):
self._paths = list(map(pathlib.Path, remove_duplicates(paths)))
if not self._paths:
message = 'MultiplexedPath must contain at least one path'
raise FileNotFoundError(message)
if not all(path.is_dir() for path in self._paths):
raise NotADirectoryError('MultiplexedPath only supports directories')
def iterdir(self):
files = (file for path in self._paths for file in path.iterdir())
return unique_everseen(files, key=operator.attrgetter('name'))
def read_bytes(self):
raise FileNotFoundError(f'{self} is not a file')
def read_text(self, *args, **kwargs):
raise FileNotFoundError(f'{self} is not a file')
def is_dir(self):
return True
def is_file(self):
return False
def joinpath(self, *descendants):
try:
return super().joinpath(*descendants)
except abc.TraversalError:
# One of the paths did not resolve (a directory does not exist).
# Just return something that will not exist.
return self._paths[0].joinpath(*descendants)
def open(self, *args, **kwargs):
raise FileNotFoundError(f'{self} is not a file')
@property
def name(self):
return self._paths[0].name
def __repr__(self):
paths = ', '.join(f"'{path}'" for path in self._paths)
return f'MultiplexedPath({paths})'
class NamespaceReader(abc.TraversableResources):
def __init__(self, namespace_path):
if 'NamespacePath' not in str(namespace_path):
raise ValueError('Invalid path')
self.path = MultiplexedPath(*list(namespace_path))
def resource_path(self, resource):
"""
Return the file system path to prevent
`resources.path()` from creating a temporary
copy.
"""
return str(self.path.joinpath(resource))
def files(self):
return self.path
| 3,581
|
Python
|
.py
| 91
| 31.791209
| 81
| 0.652413
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|