code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
handlers = HANDLERS['id_token']
# Select only the relevant section of the claims request.
claims_request_section = claims_request.get('id_token', {}) if claims_request else {}
scope_request = provider.scope.to_names(access_token.scope)
if nonce:
claims_request_section.update({'nonce': {'value': nonce}})
scopes, claims = collect(
handlers,
access_token,
scope_request=scope_request,
claims_request=claims_request_section,
)
return IDToken(access_token, scopes, claims)
|
def id_token(access_token, nonce=None, claims_request=None)
|
Returns data required for an OpenID Connect ID Token according to:
- http://openid.net/specs/openid-connect-basic-1_0.html#IDToken
Arguments:
access_token (:class:`AccessToken`): Associated OAuth2 access token.
nonce (str): Optional nonce to protect against replay attacks.
claims_request (dict): Optional dictionary with the claims request parameters.
Information on the `claims_request` parameter specification:
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Returns an :class:`IDToken` instance with the scopes from the
access_token and the corresponding claims. Claims in the
`claims_request` paramater id_token section will be included *in
addition* to the ones corresponding to the scopes specified in the
`access_token`.
| 4.857718
| 4.521341
| 1.074398
|
handlers = HANDLERS['userinfo']
# Select only the relevant section of the claims request.
claims_request_section = claims_request.get('userinfo', {}) if claims_request else {}
# If nothing is requested, return the claims for the scopes in the access token.
if not scope_request and not claims_request_section:
scope_request = provider.scope.to_names(access_token.scope)
else:
scope_request = scope_request
scopes, claims = collect(
handlers,
access_token,
scope_request=scope_request,
claims_request=claims_request_section,
)
return IDToken(access_token, scopes, claims)
|
def userinfo(access_token, scope_request=None, claims_request=None)
|
Returns data required for an OpenID Connect UserInfo response, according to:
http://openid.net/specs/openid-connect-basic-1_0.html#UserInfoResponse
Supports scope and claims request parameter as described in:
- http://openid.net/specs/openid-connect-core-1_0.html#ScopeClaims
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Arguments: access_token (:class:`AccessToken`): Associated access
token. scope_request (list): Optional list of requested
scopes. Only scopes authorized in the `access_token` will be
considered. claims_request
(dict): Optional dictionary with a claims request parameter.
Information on the claims request parameter specification:
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
As a convinience, if neither `scope_request` or user_info claim is
specified in the `claims_request`, it will return the claims for
all the scopes in the `access_token`.
Returns an :class:`IDToken` instance with the scopes from the
`scope_request` and the corresponding claims. Claims in the
`claims_request` paramater userinfo section will be included *in
addition* to the ones corresponding to `scope_request`.
| 4.290046
| 3.841871
| 1.116655
|
return jwt.encode(self.claims, secret, algorithm)
|
def encode(self, secret, algorithm='HS256')
|
Encode the set of claims to the JWT (JSON Web Token) format
according to the OpenID Connect specification:
http://openid.net/specs/openid-connect-basic-1_0.html#IDToken
Arguments:
claims (dict): A dictionary with the OpenID Connect claims.
secret (str): Secret used to encode the id_token.
algorithm (str): Algorithm used for encoding.
Defaults to HS256.
Returns encoded JWT token string.
| 6.841002
| 9.451788
| 0.723779
|
# Clear the scope for requests that do not use OpenID Connect.
# Scopes for pure OAuth2 request are currently not supported.
scope = constants.DEFAULT_SCOPE
extra_data = {}
# Add OpenID Connect `id_token` if requested.
#
# TODO: Unfourtunately because of how django-oauth2-provider implements
# scopes, we cannot check if `openid` is the first scope to be
# requested, as required by OpenID Connect specification.
if provider.scope.check(constants.OPEN_ID_SCOPE, access_token.scope):
id_token = self.get_id_token(access_token, nonce)
extra_data['id_token'] = self.encode_id_token(id_token)
scope = provider.scope.to_int(*id_token.scopes)
# Update the token scope, so it includes only authorized values.
access_token.scope = scope
access_token.save()
# Get the main fields for OAuth2 response.
response_data = super(AccessTokenView, self).access_token_response_data(access_token)
# Add any additional fields if OpenID Connect is requested. The order of
# the addition makes sures the OAuth2 values are not overrided.
response_data = dict(extra_data.items() + response_data.items())
return response_data
|
def access_token_response_data(self, access_token, response_type=None, nonce='')
|
Return `access_token` fields for OAuth2, and add `id_token` fields for
OpenID Connect according to the `access_token` scope.
| 6.265841
| 5.703971
| 1.098505
|
claims_string = self.request.POST.get('claims')
claims_request = json.loads(claims_string) if claims_string else {}
return oidc.id_token(access_token, nonce, claims_request)
|
def get_id_token(self, access_token, nonce)
|
Return an ID token for the given Access Token.
| 4.121981
| 4.05607
| 1.01625
|
# Encode the ID token using the `client_secret`.
#
# TODO: Using the `client_secret` is not ideal, since it is transmitted
# over the wire in some authentication flows. A better alternative is
# to use the public key of the issuer, which also allows the ID token to
# be shared among clients. Doing so however adds some operational
# costs. We should consider this for the future.
secret = id_token.access_token.client.client_secret
return id_token.encode(secret)
|
def encode_id_token(self, id_token)
|
Return encoded ID token.
| 8.415169
| 7.877363
| 1.068272
|
access_token = self.access_token
scope_string = request.GET.get('scope')
scope_request = scope_string.split() if scope_string else None
claims_string = request.GET.get('claims')
claims_request = json.loads(claims_string) if claims_string else None
if not provider.scope.check(constants.OPEN_ID_SCOPE, access_token.scope):
return self._bad_request('Missing openid scope.')
try:
claims = self.userinfo_claims(access_token, scope_request, claims_request)
except ValueError, exception:
return self._bad_request(str(exception))
# TODO: Encode and sign responses if requested.
response = JsonResponse(claims)
return response
|
def get(self, request, *_args, **_kwargs)
|
Respond to a UserInfo request.
Two optional query parameters are accepted, scope and claims.
See the references above for more details.
| 4.02377
| 3.534319
| 1.138485
|
id_token = oidc.userinfo(access_token, scope_request, claims_request)
return id_token.claims
|
def userinfo_claims(self, access_token, scope_request, claims_request)
|
Return the claims for the requested parameters.
| 4.055961
| 3.386315
| 1.197751
|
user = access_token.user
client = access_token.client
# Instantiate handlers. Each handler is instanciated only once, allowing the
# handler to keep state in-between calls to its scope and claim methods.
handlers = [cls() for cls in handlers]
# Find all authorized scopes by including the access_token scopes. Note
# that the handlers determine if a scope is authorized, not its presense in
# the access_token.
required_scopes = set(REQUIRED_SCOPES)
token_scopes = set(provider.scope.to_names(access_token.scope))
authorized_scopes = _collect_scopes(handlers, required_scopes | token_scopes, user, client)
# Select only the authorized scopes from the requested scopes.
scope_request = set(scope_request) if scope_request else set()
scopes = required_scopes | (authorized_scopes & scope_request)
# Find all authorized claims names for the authorized_scopes.
authorized_names = _collect_names(handlers, authorized_scopes, user, client)
# Select only the requested claims if no scope has been requested. Selecting
# scopes has prevalence over selecting claims.
claims_request = _validate_claim_request(claims_request)
# Add the requested claims that are authorized to the response.
requested_names = set(claims_request.keys()) & authorized_names
names = _collect_names(handlers, scopes, user, client) | requested_names
# Get the values for the claims.
claims = _collect_values(
handlers,
names=names,
user=user,
client=client,
values=claims_request or {}
)
return authorized_scopes, claims
|
def collect(handlers, access_token, scope_request=None, claims_request=None)
|
Collect all the claims values from the `handlers`.
Arguments:
handlers (list): List of claim :class:`Handler` classes.
access_token (:class:AccessToken): Associated access token.
scope_request (list): List of requested scopes.
claims_request (dict): Dictionary with only the relevant section of a
OpenID Connect claims request.
Returns a list of the scopes from `scope_request` that are authorized, and a
dictionary of the claims associated with the authorized scopes in
`scope_request`, and additionally, the authorized claims listed in
`claims_request`.
| 4.696195
| 4.705173
| 0.998092
|
results = set()
data = {'user': user, 'client': client}
def visitor(scope_name, func):
claim_names = func(data)
# If the claim_names is None, it means that the scope is not authorized.
if claim_names is not None:
results.add(scope_name)
_visit_handlers(handlers, visitor, 'scope', scopes)
return results
|
def _collect_scopes(handlers, scopes, user, client)
|
Get a set of all the authorized scopes according to the handlers.
| 4.862234
| 4.255696
| 1.142524
|
results = set()
data = {'user': user, 'client': client}
def visitor(_scope_name, func):
claim_names = func(data)
# If the claim_names is None, it means that the scope is not authorized.
if claim_names is not None:
results.update(claim_names)
_visit_handlers(handlers, visitor, 'scope', scopes)
return results
|
def _collect_names(handlers, scopes, user, client)
|
Get the names of the claims supported by the handlers for the requested scope.
| 5.295607
| 4.397281
| 1.204291
|
results = {}
def visitor(claim_name, func):
data = {'user': user, 'client': client}
data.update(values.get(claim_name) or {})
claim_value = func(data)
# If the claim_value is None, it means that the claim is not authorized.
if claim_value is not None:
# New values overwrite previous results
results[claim_name] = claim_value
_visit_handlers(handlers, visitor, 'claim', names)
return results
|
def _collect_values(handlers, names, user, client, values)
|
Get the values from the handlers of the requested claims.
| 4.650513
| 4.084792
| 1.138495
|
results = {}
claims = claims if claims else {}
for name, value in claims.iteritems():
if value is None:
results[name] = None
elif isinstance(value, dict):
results[name] = _validate_claim_values(name, value, ignore_errors)
else:
if not ignore_errors:
msg = 'Invalid claim {}.'.format(name)
raise ValueError(msg)
return results
|
def _validate_claim_request(claims, ignore_errors=False)
|
Validates a claim request section (`userinfo` or `id_token`) according
to section 5.5 of the OpenID Connect specification:
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Returns a copy of the claim request with only the valid fields and values.
Raises ValueError is the claim request is invalid and `ignore_errors` is False
| 2.907488
| 3.078388
| 0.944484
|
results = {'essential': False}
for key, value in value.iteritems():
if key in CLAIM_REQUEST_FIELDS:
results[key] = value
else:
if not ignore_errors:
msg = 'Unknown attribute {} in claim value {}.'.format(key, name)
raise ValueError(msg)
return results
|
def _validate_claim_values(name, value, ignore_errors)
|
Helper for `validate_claim_request`
| 4.538367
| 4.102986
| 1.106113
|
results = []
for handler in handlers:
for suffix in suffixes:
func = getattr(handler, '{}_{}'.format(prefix, suffix).lower(), None)
if func:
results.append(visitor(suffix, func))
return results
|
def _visit_handlers(handlers, visitor, prefix, suffixes)
|
Use visitor partern to collect information from handlers
| 3.246549
| 2.952921
| 1.099436
|
for k, v in self.parse_form().items():
setattr(model, k, v)
|
def update_model(self, model)
|
trivial implementation for simple data in the form,
using the model prefix.
| 6.335686
| 5.396487
| 1.174039
|
locale = 'en'
if request.accept_language:
locale = request.accept_language.best_match(LANGUAGES)
locale = LANGUAGES.get(locale, 'en')
return locale
|
def locale_negotiator(request)
|
Locale negotiator base on the `Accept-Language` header
| 2.489101
| 2.451119
| 1.015496
|
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding('utf-8')
settings = dict(settings)
# Scoping sessions for Pyramid ensure session are commit/rollback
# after the template has been rendered
create_engine(settings, scoped=True)
authn_policy = RouteSwitchAuthPolicy(secret=settings['pyshop.cookie_key'],
callback=groupfinder)
authz_policy = ACLPolicy()
route_prefix = settings.get('pyshop.route_prefix')
config = Configurator(settings=settings,
root_factory=RootFactory,
route_prefix=route_prefix,
locale_negotiator=locale_negotiator,
authentication_policy=authn_policy,
authorization_policy=authz_policy)
config.end()
return config.make_wsgi_app()
|
def main(global_config, **settings)
|
Get a PyShop WSGI application configured with settings.
| 4.490096
| 4.166723
| 1.077609
|
return cls.first(session, where=(cls.name == name,))
|
def by_name(cls, session, name)
|
Get a package from a given name.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param name: name of the group
:type name: `unicode
:return: package instance
:rtype: :class:`pyshop.models.Group`
| 8.491291
| 17.973637
| 0.47243
|
user = cls.first(session,
where=((cls.login == login),
(cls.local == local),)
)
# XXX it's appear that this is not case sensitive !
return user if user and user.login == login else None
|
def by_login(cls, session, login, local=True)
|
Get a user from a given login.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param login: the user login
:type login: unicode
:return: the associated user
:rtype: :class:`pyshop.models.User`
| 7.480319
| 9.251044
| 0.808592
|
user = cls.by_login(session, login, local=True)
if not user:
return None
if crypt.check(user.password, password):
return user
|
def by_credentials(cls, session, login, password)
|
Get a user from given credentials
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param login: username
:type login: unicode
:param password: user password
:type password: unicode
:return: associated user
:rtype: :class:`pyshop.models.User`
| 4.471068
| 6.097105
| 0.73331
|
return cls.find(session,
where=(cls.local == True,),
order_by=cls.login,
**kwargs)
|
def get_locals(cls, session, **kwargs)
|
Get all local users.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:return: local users
:rtype: generator of :class:`pyshop.models.User`
| 8.953303
| 8.500695
| 1.053244
|
errors = []
if not self.login:
errors.append(u'login is required')
else:
other = User.by_login(session, self.login)
if other and other.id != self.id:
errors.append(u'duplicate login %s' % self.login)
if not self.password:
errors.append(u'password is required')
if not self.email:
errors.append(u'email is required')
elif not re_email.match(self.email):
errors.append(u'%s is not a valid email' % self.email)
if len(errors):
raise ModelError(errors)
return True
|
def validate(self, session)
|
Validate that the current user can be saved.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:return: ``True``
:rtype: bool
:raise: :class:`pyshop.helpers.sqla.ModelError` if user is not valid
| 2.270962
| 1.996405
| 1.137526
|
classifier = cls.first(session, where=(cls.name == name,))
if not kwargs.get('create_if_not_exists', False):
return classifier
if not classifier:
splitted_names = [n.strip() for n in name.split(u'::')]
classifiers = [u' :: '.join(splitted_names[:i + 1])
for i in range(len(splitted_names))]
parent_id = None
category = splitted_names[0]
for c in classifiers:
classifier = cls.first(session, where=(cls.name == c,))
if not classifier:
classifier = Classifier(name=c, parent_id=parent_id,
category=category)
session.add(classifier)
session.flush()
parent_id = classifier.id
return classifier
|
def by_name(cls, session, name, **kwargs)
|
Get a classifier from a given name.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param name: name of the classifier
:type name: `unicode
:return: classifier instance
:rtype: :class:`pyshop.models.Classifier`
| 2.863173
| 2.959933
| 0.96731
|
releases = [(parse_version(release.version), release)
for release in self.releases]
releases.sort(reverse=True)
return [release[1] for release in releases]
|
def sorted_releases(self)
|
Releases sorted by version.
| 3.248098
| 2.739421
| 1.185688
|
# XXX the field "name" should be created with a
# case insensitive collation.
pkg = cls.first(session, where=(cls.name.like(name),))
if not pkg:
name = name.replace(u'-', u'_').upper()
pkg = cls.first(session,
where=(cls.name.like(name),))
# XXX _ is a like operator
if pkg and pkg.name.upper().replace(u'-', u'_') != name:
pkg = None
return pkg
|
def by_name(cls, session, name)
|
Get a package from a given name.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param name: name of the package
:type name: `unicode
:return: package instance
:rtype: :class:`pyshop.models.Package`
| 5.24603
| 5.482664
| 0.95684
|
where = []
if opts.get('local_only'):
where.append(cls.local == True)
if opts.get('names'):
where.append(cls.name.in_(opts['names']))
if opts.get('classifiers'):
ids = [c.id for c in opts.get('classifiers')]
cls_pkg = classifier__package
qry = session.query(cls_pkg.c.package_id,
func.count('*'))
qry = qry.filter(cls_pkg.c.classifier_id.in_(ids))
qry = qry.group_by(cls_pkg.c.package_id)
qry = qry.having(func.count('*') >= len(ids))
where.append(cls.id.in_([r[0] for r in qry.all()]))
return cls.find(session, where=where, **kwargs)
|
def by_filter(cls, session, opts, **kwargs)
|
Get packages from given filters.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param opts: filtering options
:type opts: `dict
:return: package instances
:rtype: generator of :class:`pyshop.models.Package`
| 2.372463
| 2.402388
| 0.987544
|
return cls.find(session,
join=(cls.owners),
where=(User.login == owner_name,),
order_by=cls.name)
|
def by_owner(cls, session, owner_name)
|
Get packages from a given owner username.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param owner_name: owner username
:type owner_name: unicode
:return: package instances
:rtype: generator of :class:`pyshop.models.Package`
| 6.980105
| 10.210633
| 0.683611
|
return cls.find(session,
join=(cls.maintainers),
where=(User.login == maintainer_name,),
order_by=cls.name)
|
def by_maintainer(cls, session, maintainer_name)
|
Get package from a given maintainer name.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param maintainer_name: maintainer username
:type maintainer_name: unicode
:return: package instances
:rtype: generator of :class:`pyshop.models.Package`
| 6.653712
| 9.390029
| 0.708593
|
return cls.find(session,
where=(cls.local == True,))
|
def get_locals(cls, session)
|
Get all local packages.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:return: package instances
:rtype: generator of :class:`pyshop.models.Package`
| 25.983286
| 30.161972
| 0.861458
|
return cls.find(session,
where=(cls.local == False,))
|
def get_mirrored(cls, session)
|
Get all mirrored packages.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:return: package instances
:rtype: generator of :class:`pyshop.models.Package`
| 27.492085
| 43.700478
| 0.629103
|
return cls.first(session,
join=(Package,),
where=((Package.name == package_name),
(cls.version == version)))
|
def by_version(cls, session, package_name, version)
|
Get release for a given version.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param package_name: package name
:type package_name: unicode
:param version: version
:type version: unicode
:return: release instance
:rtype: :class:`pyshop.models.Release`
| 8.050824
| 11.38379
| 0.707218
|
return cls.find(session,
join=(cls.classifiers,),
where=(Classifier.name.in_(classifiers),),
)
|
def by_classifiers(cls, session, classifiers)
|
Get releases for given classifiers.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param classifiers: classifiers
:type classifiers: unicode
:return: release instances
:rtype: generator of :class:`pyshop.models.Release`
| 7.07685
| 10.71823
| 0.660263
|
available = {'name': Package.name,
'version': cls.version,
'author': User.login,
'author_email': User.email,
'maintainer': User.login,
'maintainer_email': User.email,
'home_page': cls.home_page,
'license': cls.license,
'summary': cls.summary,
'description': cls.description,
'keywords': cls.keywords,
'platform': cls.platform,
'download_url': cls.download_url
}
oper = {'or': or_, 'and': and_}
join_map = {'name': Package,
'author': cls.author,
'author_email': cls.author,
'maintainer': cls.maintainer,
'maintainer_email': cls.maintainer,
}
where = []
join = []
for opt, val in opts.items():
field = available[opt]
if hasattr(val, '__iter__') and len(val) > 1:
stmt = or_(*[field.like(u'%%%s%%' % v) for v in val])
else:
stmt = field.like(u'%%%s%%' % val)
where.append(stmt)
if opt in join_map:
join.append(join_map[opt])
return cls.find(session, join=join,
where=(oper[operator](*where),))
|
def search(cls, session, opts, operator)
|
Get releases for given filters.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param opts: filtering options
:type opts: dict
:param operator: filtering options joining operator (`and` or `or`)
:type operator: basestring
:return: release instances
:rtype: generator of :class:`pyshop.models.Release`
| 2.42015
| 2.405559
| 1.006066
|
return cls.find(session,
join=(Release, Package),
where=(Package.name == package_name,
Release.version == version,
))
|
def by_release(cls, session, package_name, version)
|
Get release files for a given package
name and for a given version.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param package_name: package name
:type package_name: unicode
:param version: version
:type version: unicode
:return: release files
:rtype: generator of :class:`pyshop.models.ReleaseFile`
| 5.041506
| 9.752338
| 0.516954
|
return cls.first(session,
where=(ReleaseFile.release_id == release.id,
ReleaseFile.filename == filename,
))
|
def by_filename(cls, session, release, filename)
|
Get a release file for a given release and a given filename.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param release: release
:type release: :class:`pyshop.models.Release`
:param filename: filename of the release file
:type filename: unicode
:return: release file
:rtype: :class:`pyshop.models.ReleaseFile`
| 4.263135
| 5.78436
| 0.737011
|
event['static_url'] = lambda x: static_path(x, event['request'])
event['route_url'] = lambda name, *args, **kwargs: \
route_path(name, event['request'], *args, **kwargs)
event['parse_rest'] = parse_rest
event['has_permission'] = event['request'].has_permission
|
def add_urlhelpers(event)
|
Add helpers to the template engine.
| 3.560537
| 3.483794
| 1.022029
|
session = DBSession()
names = [p.name for p in Package.all(session, order_by=Package.name)]
return names
|
def list_packages(request)
|
Retrieve a list of the package names registered with the package index.
Returns a list of name strings.
| 5.690326
| 5.376923
| 1.058287
|
session = DBSession()
package = Package.by_name(session, package_name)
return [rel.version for rel in package.sorted_releases]
|
def package_releases(request, package_name, show_hidden=False)
|
Retrieve a list of the releases registered for the given package_name.
Returns a list with all version strings if show_hidden is True or
only the non-hidden ones otherwise.
| 6.106568
| 4.904615
| 1.245066
|
session = DBSession()
package = Package.by_name(session, package_name)
owners = [('Owner', o.name) for o in package.owners]
maintainers = [('Maintainer', o.name) for o in package.maintainers]
return owners + maintainers
|
def package_roles(request, package_name)
|
Retrieve a list of users and their attributes roles for a given
package_name. Role is either 'Maintainer' or 'Owner'.
| 3.337315
| 2.812113
| 1.186764
|
session = DBSession()
owned = Package.by_owner(session, user)
maintained = Package.by_maintainer(session, user)
owned = [('Owner', p.name) for p in owned]
maintained = [('Maintainer', p.name) for p in maintained]
return owned + maintained
|
def user_packages(request, user)
|
Retrieve a list of [role_name, package_name] for a given username.
Role is either 'Maintainer' or 'Owner'.
| 3.588411
| 2.881859
| 1.245172
|
session = DBSession()
release_files = ReleaseFile.by_release(session, package_name, version)
if release_files:
release_files = [(f.release.package.name,
f.filename) for f in release_files]
return release_files
|
def release_downloads(request, package_name, version)
|
Retrieve a list of files and download count for a given package and
release version.
| 4.065566
| 4.320292
| 0.941039
|
session = DBSession()
release_files = ReleaseFile.by_release(session, package_name, version)
return [{'url': f.url,
'packagetype': f.package_type,
'filename': f.filename,
'size': f.size,
'md5_digest': f.md5_digest,
'downloads': f.downloads,
'has_sig': f.has_sig,
'comment_text': f.comment_text,
'python_version': f.python_version
}
for f in release_files]
|
def release_urls(request, package_name, version)
|
Retrieve a list of download URLs for the given package release.
Returns a list of dicts with the following keys:
url
packagetype ('sdist', 'bdist', etc)
filename
size
md5_digest
downloads
has_sig
python_version (required version, or 'source', or 'any')
comment_text
| 2.677954
| 1.895023
| 1.413151
|
session = DBSession()
release = Release.by_version(session, package_name, version)
if release:
result = {'name': release.package.name,
'version': release.version,
'stable_version': '',
'author': release.author.name,
'author_email': release.author.email,
'home_page': release.home_page,
'license': release.license,
'summary': release.summary,
'description': release.description,
'keywords': release.keywords,
'platform': release.platform,
'download_url': release.download_url,
'classifiers': [c.name for c in release.classifiers],
#'requires': '',
#'requires_dist': '',
#'provides': '',
#'provides_dist': '',
#'requires_external': '',
#'requires_python': '',
#'obsoletes': '',
#'obsoletes_dist': '',
'bugtrack_url': release.bugtrack_url,
'docs_url': release.docs_url,
}
if release.maintainer:
result.update({'maintainer': release.maintainer.name,
'maintainer_email': release.maintainer.email,
})
return dict([(key, val or '') for key, val in result.items()])
|
def release_data(request, package_name, version)
|
Retrieve metadata describing a specific package release.
Returns a dict with keys for:
name
version
stable_version
author
author_email
maintainer
maintainer_email
home_page
license
summary
description
keywords
platform
download_url
classifiers (list of classifier strings)
requires
requires_dist
provides
provides_dist
requires_external
requires_python
obsoletes
obsoletes_dist
project_url
docs_url (URL of the packages.python.org docs
if they've been supplied)
If the release does not exist, an empty dictionary is returned.
| 2.169616
| 1.872412
| 1.158728
|
api = pypi.proxy
rv = []
# search in proxy
for k, v in spec.items():
rv += api.search({k: v}, True)
# search in local
session = DBSession()
release = Release.search(session, spec, operator)
rv += [{'name': r.package.name,
'version': r.version,
'summary': r.summary,
# hack https://mail.python.org/pipermail/catalog-sig/2012-October/004633.html
'_pypi_ordering':'',
} for r in release]
return rv
|
def search(request, spec, operator='and')
|
Search the package database using the indicated search spec.
The spec may include any of the keywords described in the above list
(except 'stable_version' and 'classifiers'),
for example: {'description': 'spam'} will search description fields.
Within the spec, a field's value can be a string or a list of strings
(the values within the list are combined with an OR),
for example: {'name': ['foo', 'bar']}.
Valid keys for the spec dict are listed here. Invalid keys are ignored:
name
version
author
author_email
maintainer
maintainer_email
home_page
license
summary
description
keywords
platform
download_url
Arguments for different fields are combined using either "and"
(the default) or "or".
Example: search({'name': 'foo', 'description': 'bar'}, 'or').
The results are returned as a list of dicts
{'name': package name,
'version': package release version,
'summary': package release summary}
| 6.139811
| 5.992166
| 1.02464
|
session = DBSession()
release = Release.by_classifiers(session, classifiers)
rv = [(r.package.name, r.version) for r in release]
return rv
|
def browse(request, classifiers)
|
Retrieve a list of (name, version) pairs of all releases classified
with all of the given classifiers. 'classifiers' must be a list of
Trove classifier strings.
changelog(since)
Retrieve a list of four-tuples (name, version, timestamp, action)
since the given timestamp. All timestamps are UTC values.
The argument is a UTC integer seconds since the epoch.
| 6.72138
| 5.879684
| 1.143153
|
global proxy, PYPI_URL
PYPI_URL = proxy_url
proxy = xmlrpc.ServerProxy(
proxy_url,
transport=RequestsTransport(proxy_url.startswith('https://')),
allow_none=True)
|
def set_proxy(proxy_url, transport_proxy=None)
|
Create the proxy to PyPI XML-RPC Server
| 5.936368
| 4.377838
| 1.356005
|
headers = {'User-Agent': self.user_agent,
#Proxy-Connection': 'Keep-Alive',
#'Content-Range': 'bytes oxy1.0/-1',
'Accept': 'text/xml',
'Content-Type': 'text/xml' }
url = self._build_url(host, handler)
try:
resp = requests.post(url, data=request_body, headers=headers)
except ValueError:
raise
except Exception:
raise # something went wrong
else:
try:
resp.raise_for_status()
except requests.RequestException as e:
raise xmlrpc.ProtocolError(url, resp.status_code,
str(e), resp.headers)
else:
return self.parse_response(resp)
|
def request(self, host, handler, request_body, verbose)
|
Make an xmlrpc request.
| 3.666238
| 3.548704
| 1.03312
|
if len(request.environ.get('HTTP_AUTHORIZATION', '')) > 0:
auth = request.environ.get('HTTP_AUTHORIZATION')
scheme, data = auth.split(None, 1)
assert scheme.lower() == 'basic'
data = base64.b64decode(data)
if not isinstance(data, unicode):
data = data.decode('utf-8')
username, password = data.split(':', 1)
if User.by_ldap_credentials(
DBSession(), username, password, request.registry.settings):
return HTTPFound(location=request.url)
if User.by_credentials(DBSession(), username, password):
return HTTPFound(location=request.url)
return Response(status=401,
headerlist=[(str('WWW-Authenticate'),
str('Basic realm="pyshop repository access"'),
)],
)
|
def authbasic(request)
|
Authentification basic, Upload pyshop repository access
| 2.838121
| 2.565087
| 1.106442
|
if RootFactory._acl is None:
acl = []
session = DBSession()
groups = Group.all(session)
for g in groups:
acl.extend([(Allow, g.name, p.name) for p in g.permissions])
RootFactory._acl = acl
return RootFactory._acl
|
def get_acl(self, request)
|
Get ACL.
Initialize the __acl__ from the sql database once,
then use the cached version.
:param request: pyramid request
:type login: :class:`pyramid.request.Request`
:return: ACLs in pyramid format. (Allow, group name, permission name)
:rtype: list of tupple
| 4.62087
| 3.985437
| 1.159439
|
settings = request.registry.settings
whlify = asbool(settings.get('pyshop.mirror.wheelify', '0'))
session = DBSession()
f = ReleaseFile.by_id(session, int(request.matchdict['file_id']))
whlify = whlify and f.package_type == 'sdist'
filename = f.filename_whlified if whlify else f.filename
url = f.url
if url and url.startswith('http://pypi.python.org'):
url = 'https' + url[4:]
rv = {'url': url,
'filename': filename,
'original': f.filename,
'whlify': whlify
}
f.downloads += 1
f.release.downloads += 1
f.release.package.downloads += 1
session.add(f.release.package)
session.add(f.release)
session.add(f)
request.response.etag = f.md5_digest
request.response.cache_control = 'max-age=31557600, public'
request.response.date = datetime.datetime.utcnow()
return rv
|
def show_release_file(root, request)
|
Download a release file.
Must be used with :func:`pyshop.helpers.download.renderer_factory`
to download the release file.
:return: download informations
:rtype: dict
| 3.956451
| 3.941814
| 1.003713
|
session = DBSession()
settings = request.registry.settings
whlify = asbool(settings.get('pyshop.mirror.wheelify', '0'))
release = Release.by_id(session, int(request.matchdict['release_id']))
filename = (release.whlify_download_url_file if whlify else
release.download_url_file)
rv = {'url': release.download_url,
'filename': filename,
'original': release.download_url_file,
'whlify': whlify
}
release.downloads += 1
release.package.downloads += 1
session.add(release.package)
session.add(release)
request.response.date = datetime.datetime.utcnow()
return rv
|
def show_external_release_file(root, request)
|
Download a release from a download url from its package information.
Must be used with :func:`pyshop.helpers.download.renderer_factory`
to download the release file.
:return: download informations
:rtype: dict
| 4.982837
| 4.812938
| 1.035301
|
'''Wrapper for b64decode, without having to struggle with bytestrings.'''
byte_string = data.encode('utf-8')
decoded = base64.b64decode(byte_string)
return decoded.decode('utf-8')
|
def decode_b64(data)
|
Wrapper for b64decode, without having to struggle with bytestrings.
| 4.626493
| 2.219589
| 2.084391
|
'''Wrapper for b64encode, without having to struggle with bytestrings.'''
byte_string = data.encode('utf-8')
encoded = base64.b64encode(byte_string)
return encoded.decode('utf-8')
|
def encode_b64(data)
|
Wrapper for b64encode, without having to struggle with bytestrings.
| 4.560985
| 2.195289
| 2.077624
|
nx, ny = self._nx * subgrid_res, self._ny * subgrid_res
if self._idex_mask_bool is True:
idex_mask = self._idex_mask
grid1d = np.zeros((nx * ny))
if subgrid_res > 1:
idex_mask_subgrid = self._idex_mask_sub
else:
idex_mask_subgrid = idex_mask
grid1d[idex_mask_subgrid == 1] = array
else:
grid1d = array
grid2d = util.array2image(grid1d, nx, ny)
return grid2d
|
def array2image(self, array, subgrid_res=1)
|
maps a 1d array into a (nx, ny) 2d grid with array populating the idex_mask indices
:param array: 1d array
:param idex_mask: 1d array of length nx*ny
:param nx: x-axis of 2d grid
:param ny: y-axis of 2d grid
:return:
| 2.698714
| 2.441085
| 1.105539
|
idex_mask = self._idex_mask
array = util.image2array(image)
if self._idex_mask_bool is True:
return array[idex_mask == 1]
else:
return array
|
def image2array(self, image)
|
returns 1d array of values in image in idex_mask
:param image:
:param idex_mask:
:return:
| 5.765616
| 4.067679
| 1.417422
|
if not hasattr(self, '_x_min_psf'):
idex_2d = self._idex_mask_2d
self._x_min_psf = np.min(np.where(idex_2d == 1)[0])
self._x_max_psf = np.max(np.where(idex_2d == 1)[0])
self._y_min_psf = np.min(np.where(idex_2d == 1)[1])
self._y_max_psf = np.max(np.where(idex_2d == 1)[1])
|
def _init_mask_psf(self)
|
smaller frame that encolses all the idex_mask
:param idex_mask:
:param nx:
:param ny:
:return:
| 1.861565
| 1.749244
| 1.064212
|
self._init_mask_psf()
return image[self._x_min_psf*subgrid_res:(self._x_max_psf+1)*subgrid_res, self._y_min_psf*subgrid_res:(self._y_max_psf+1)*subgrid_res]
|
def _cutout_psf(self, image, subgrid_res)
|
cutout the part of the image relevant for the psf convolution
:param image:
:return:
| 2.409315
| 2.363256
| 1.01949
|
n = self._index2n(index)
num_prev = n * (n + 1) / 2
num = index + 1
delta = int(num - num_prev - 1)
if n % 2 == 0:
if delta == 0:
m = delta
complex_bool = False
elif delta % 2 == 0:
complex_bool = True
m = delta
else:
complex_bool = False
m = delta + 1
else:
if delta % 2 == 0:
complex_bool = False
m = delta + 1
else:
complex_bool = True
m = delta
return n, m, complex_bool
|
def index2poly(self, index)
|
manages the convention from an iterative index to the specific polynomial n, m, (real/imaginary part)
:param index: int, index of list
:return: n, m bool
| 2.923363
| 2.762464
| 1.058245
|
num_param = self.shapelets.num_param(n_max)
param_list = np.zeros(num_param)
amp_norm = 1. * deltaPix**2
L_list = self._pre_calc(x, y, beta, n_max, center_x, center_y)
for i in range(num_param):
base = self._pre_calc_function(L_list, i) * amp_norm
param = np.sum(image*base)
n, m, complex_bool = self.shapelets.index2poly(i)
if m != 0:
param *= 2
param_list[i] = param
return param_list
|
def decomposition(self, image, x, y, n_max, beta, deltaPix, center_x=0, center_y=0)
|
decomposes an image into the shapelet coefficients in same order as for the function call
:param image:
:param x:
:param y:
:param n_max:
:param beta:
:param center_x:
:param center_y:
:return:
| 4.6639
| 4.632709
| 1.006733
|
nx, ny = np.shape(kernel)
kernel_new = np.zeros((nx+2, ny+2)) + (kernel[0, 0] + kernel[0, -1] + kernel[-1, 0] + kernel[-1, -1]) / 4.
kernel_new[1:-1, 1:-1] = kernel
int_shift_x = int(round(shift_x))
frac_x_shift = shift_x - int_shift_x
int_shift_y = int(round(shift_y))
frac_y_shift = shift_y - int_shift_y
kernel_init = copy.deepcopy(kernel_new)
kernel_init_shifted = copy.deepcopy(interp.shift(kernel_init, [int_shift_y, int_shift_x], order=1))
kernel_new = interp.shift(kernel_new, [int_shift_y, int_shift_x], order=1)
norm = np.sum(kernel_init_shifted)
for i in range(iterations):
kernel_shifted_inv = interp.shift(kernel_new, [-frac_y_shift, -frac_x_shift], order=1)
delta = kernel_init_shifted - kernel_norm(kernel_shifted_inv) * norm
kernel_new += delta * 1.
kernel_new = kernel_norm(kernel_new) * norm
return kernel_new[1:-1, 1:-1]
|
def de_shift_kernel(kernel, shift_x, shift_y, iterations=20)
|
de-shifts a shifted kernel to the center of a pixel. This is performed iteratively.
The input kernel is the solution of a linear interpolated shift of a sharper kernel centered in the middle of the
pixel. To find the de-shifted kernel, we perform an iterative correction of proposed de-shifted kernels and compare
its shifted version with the input kernel.
:param kernel: (shifted) kernel, e.g. a star in an image that is not centered in the pixel grid
:param shift_x: x-offset relative to the center of the pixel (sub-pixel shift)
:param shift_y: y-offset relative to the center of the pixel (sub-pixel shift)
:return: de-shifted kernel such that the interpolated shift boy (shift_x, shift_y) results in the input kernel
| 2.072986
| 2.060698
| 1.005963
|
kernel = kernel_norm(kernel)
nx, ny = np.shape(kernel)
if nx %2 == 0:
raise ValueError("kernel needs odd number of pixels")
# make coordinate grid of kernel
x_grid, y_grid = util.make_grid(nx, deltapix=1, left_lower=False)
# compute 1st moments to get light weighted center
x_w = np.sum(kernel * util.array2image(x_grid))
y_w = np.sum(kernel * util.array2image(y_grid))
# de-shift kernel
kernel_centered = de_shift_kernel(kernel, shift_x=-x_w, shift_y=-y_w, iterations=iterations)
return kernel_norm(kernel_centered)
|
def center_kernel(kernel, iterations=20)
|
given a kernel that might not be perfectly centered, this routine computes its light weighted center and then
moves the center in an iterative process such that it is centered
:param kernel: 2d array (odd numbers)
:param iterations: int, number of iterations
:return: centered kernel
| 4.712097
| 4.467527
| 1.054744
|
subgrid_res = int(subgrid_res)
if subgrid_res == 1:
return kernel
nx, ny = np.shape(kernel)
d_x = 1. / nx
x_in = np.linspace(d_x/2, 1-d_x/2, nx)
d_y = 1. / nx
y_in = np.linspace(d_y/2, 1-d_y/2, ny)
nx_new = nx * subgrid_res
ny_new = ny * subgrid_res
if odd is True:
if nx_new % 2 == 0:
nx_new -= 1
if ny_new % 2 == 0:
ny_new -= 1
d_x_new = 1. / nx_new
d_y_new = 1. / ny_new
x_out = np.linspace(d_x_new/2., 1-d_x_new/2., nx_new)
y_out = np.linspace(d_y_new/2., 1-d_y_new/2., ny_new)
kernel_input = copy.deepcopy(kernel)
kernel_subgrid = image_util.re_size_array(x_in, y_in, kernel_input, x_out, y_out)
kernel_subgrid = kernel_norm(kernel_subgrid)
for i in range(max(num_iter, 1)):
# given a proposition, re-size it to original pixel size
if subgrid_res % 2 == 0:
kernel_pixel = averaging_even_kernel(kernel_subgrid, subgrid_res)
else:
kernel_pixel = util.averaging(kernel_subgrid, numGrid=nx_new, numPix=nx)
delta = kernel - kernel_pixel
#plt.matshow(delta)
#plt.colorbar()
#plt.show()
temp_kernel = kernel_input + delta
kernel_subgrid = image_util.re_size_array(x_in, y_in, temp_kernel, x_out, y_out)#/norm_subgrid
kernel_subgrid = kernel_norm(kernel_subgrid)
kernel_input = temp_kernel
#from scipy.ndimage import zoom
#ratio = subgrid_res
#kernel_subgrid = zoom(kernel, ratio, order=4) / ratio ** 2
#print(np.shape(kernel_subgrid))
# whatever has not been matched is added to zeroth order (in squares of the undersampled PSF)
if subgrid_res % 2 == 0:
return kernel_subgrid
kernel_pixel = util.averaging(kernel_subgrid, numGrid=nx_new, numPix=nx)
kernel_pixel = kernel_norm(kernel_pixel)
delta_kernel = kernel_pixel - kernel_norm(kernel)
id = np.ones((subgrid_res, subgrid_res))
delta_kernel_sub = np.kron(delta_kernel, id)/subgrid_res**2
return kernel_norm(kernel_subgrid - delta_kernel_sub)
|
def subgrid_kernel(kernel, subgrid_res, odd=False, num_iter=100)
|
creates a higher resolution kernel with subgrid resolution as an interpolation of the original kernel in an
iterative approach
:param kernel: initial kernel
:param subgrid_res: subgrid resolution required
:return: kernel with higher resolution (larger)
| 2.683855
| 2.713461
| 0.989089
|
n_high = len(kernel_high_res)
n_low = int((n_high + 1) / subgrid_res)
kernel_low_res = np.zeros((n_low, n_low))
# adding pixels that are fully within a single re-binned pixel
for i in range(subgrid_res-1):
for j in range(subgrid_res-1):
kernel_low_res += kernel_high_res[i::subgrid_res, j::subgrid_res]
# adding half of a pixel that has over-lap with two pixels
i = subgrid_res - 1
for j in range(subgrid_res - 1):
kernel_low_res[1:, :] += kernel_high_res[i::subgrid_res, j::subgrid_res] / 2
kernel_low_res[:-1, :] += kernel_high_res[i::subgrid_res, j::subgrid_res] / 2
j = subgrid_res - 1
for i in range(subgrid_res - 1):
kernel_low_res[:, 1:] += kernel_high_res[i::subgrid_res, j::subgrid_res] / 2
kernel_low_res[:, :-1] += kernel_high_res[i::subgrid_res, j::subgrid_res] / 2
# adding a quarter of a pixel value that is at the boarder of four pixels
i = subgrid_res - 1
j = subgrid_res - 1
kernel_edge = kernel_high_res[i::subgrid_res, j::subgrid_res]
kernel_low_res[1:, 1:] += kernel_edge / 4
kernel_low_res[:-1, 1:] += kernel_edge / 4
kernel_low_res[1:, :-1] += kernel_edge / 4
kernel_low_res[:-1, :-1] += kernel_edge / 4
return kernel_low_res
|
def averaging_even_kernel(kernel_high_res, subgrid_res)
|
makes a lower resolution kernel based on the kernel_high_res (odd numbers) and the subgrid_res (even number), both
meant to be centered.
:param kernel_high_res: high resolution kernel with even subsampling resolution, centered
:param subgrid_res: subsampling resolution (even number)
:return: averaged undersampling kernel
| 1.838849
| 1.870697
| 0.982976
|
numPix = len(kernel)
numPix_new = int(round(numPix * deltaPix_in/deltaPix_out))
if numPix_new % 2 == 0:
numPix_new -= 1
x_in = np.linspace(-(numPix-1)/2*deltaPix_in, (numPix-1)/2*deltaPix_in, numPix)
x_out = np.linspace(-(numPix_new-1)/2*deltaPix_out, (numPix_new-1)/2*deltaPix_out, numPix_new)
kernel_out = image_util.re_size_array(x_in, x_in, kernel, x_out, x_out)
kernel_out = kernel_norm(kernel_out)
return kernel_out
|
def kernel_pixelsize_change(kernel, deltaPix_in, deltaPix_out)
|
change the pixel size of a given kernel
:param kernel:
:param deltaPix_in:
:param deltaPix_out:
:return:
| 2.088163
| 2.099588
| 0.994558
|
kernel = image_util.cut_edges(psf_data, psf_size)
kernel = kernel_norm(kernel)
return kernel
|
def cut_psf(psf_data, psf_size)
|
cut the psf properly
:param psf_data: image of PSF
:param psf_size: size of psf
:return: re-sized and re-normalized PSF
| 6.026328
| 7.681044
| 0.784572
|
kernel_subgrid = subgrid_kernel(point_source_kernel, subgrid_res, num_iter=10)
kernel_size = len(point_source_kernel)
kernel_pixel = np.zeros((kernel_size*subgrid_res, kernel_size*subgrid_res))
for i in range(subgrid_res):
k_x = int((kernel_size-1) / 2 * subgrid_res + i)
for j in range(subgrid_res):
k_y = int((kernel_size-1) / 2 * subgrid_res + j)
kernel_pixel = image_util.add_layer2image(kernel_pixel, k_x, k_y, kernel_subgrid)
kernel_pixel = util.averaging(kernel_pixel, numGrid=kernel_size*subgrid_res, numPix=kernel_size)
return kernel_norm(kernel_pixel)
|
def pixel_kernel(point_source_kernel, subgrid_res=7)
|
converts a pixelised kernel of a point source to a kernel representing a uniform extended pixel
:param point_source_kernel:
:param subgrid_res:
:return: convolution kernel for an extended pixel
| 2.71149
| 2.869588
| 0.944906
|
n = len(kernel)
n_sub = len(kernel_subgrid)
if subsampling_size % 2 == 0:
subsampling_size += 1
if subsampling_size > n:
subsampling_size = n
kernel_hole = copy.deepcopy(kernel)
n_min = int((n-1)/2 - (subsampling_size-1)/2)
n_max = int((n-1)/2 + (subsampling_size-1)/2 + 1)
kernel_hole[n_min:n_max, n_min:n_max] = 0
n_min_sub = int((n_sub - 1) / 2 - (subsampling_size*subgrid_res - 1) / 2)
n_max_sub = int((n_sub - 1) / 2 + (subsampling_size * subgrid_res - 1) / 2 + 1)
kernel_subgrid_cut = kernel_subgrid[n_min_sub:n_max_sub, n_min_sub:n_max_sub]
flux_subsampled = np.sum(kernel_subgrid_cut)
flux_hole = np.sum(kernel_hole)
if flux_hole > 0:
kernel_hole *= (1. - flux_subsampled) / np.sum(kernel_hole)
else:
kernel_subgrid_cut /= np.sum(kernel_subgrid_cut)
return kernel_hole, kernel_subgrid_cut
|
def split_kernel(kernel, kernel_subgrid, subsampling_size, subgrid_res)
|
pixel kernel and subsampling kernel such that the convolution of both applied on an image can be
performed, i.e. smaller subsampling PSF and hole in larger PSF
:param kernel: PSF kernel of the size of the pixel
:param kernel_subgrid: subsampled kernel
:param subsampling_size: size of subsampling PSF in units of image pixels
:return: pixel and subsampling kernel
| 1.818897
| 1.801642
| 1.009578
|
if kernelsize % 2 == 0:
raise ValueError("even pixel number kernel size not supported!")
x_int = int(round(x_pos))
y_int = int(round(y_pos))
n = len(image)
d = (kernelsize - 1)/2
x_max = int(np.minimum(x_int + d + 1, n))
x_min = int(np.maximum(x_int - d, 0))
y_max = int(np.minimum(y_int + d + 1, n))
y_min = int(np.maximum(y_int - d, 0))
image_cut = copy.deepcopy(image[y_min:y_max, x_min:x_max])
shift_x = x_int - x_pos
shift_y = y_int - y_pos
if shift is True:
kernel_shift = de_shift_kernel(image_cut, shift_x, shift_y, iterations=50)
else:
kernel_shift = image_cut
kernel_final = np.zeros((kernelsize, kernelsize))
k_l2_x = int((kernelsize - 1) / 2)
k_l2_y = int((kernelsize - 1) / 2)
xk_min = np.maximum(0, -x_int + k_l2_x)
yk_min = np.maximum(0, -y_int + k_l2_y)
xk_max = np.minimum(kernelsize, -x_int + k_l2_x + n)
yk_max = np.minimum(kernelsize, -y_int + k_l2_y + n)
kernel_final[yk_min:yk_max, xk_min:xk_max] = kernel_shift
return kernel_final
|
def cutout_source(x_pos, y_pos, image, kernelsize, shift=True)
|
cuts out point source (e.g. PSF estimate) out of image and shift it to the center of a pixel
:param x_pos:
:param y_pos:
:param image:
:param kernelsize:
:return:
| 1.940074
| 1.967493
| 0.986064
|
n = len(kernel)
if n % 2 == 0:
raise ValueError('only works with odd number of pixels in kernel!')
max_flux = kernel[int((n-1)/2), int((n-1)/2)]
I_2 = max_flux/2.
I_r = kernel[int((n-1)/2), int((n-1)/2):]
r = np.linspace(0, (n-1)/2, int((n + 1) / 2))
for i in range(1, len(r)):
if I_r[i] < I_2:
fwhm_2 = (I_2 - I_r[i-1])/(I_r[i] - I_r[i-1]) + r[i-1]
return fwhm_2 * 2
raise ValueError('The kernel did not drop to half the max value - fwhm not determined!')
|
def fwhm_kernel(kernel)
|
computes the full width at half maximum of a (PSF) kernel
:param kernel: (psf) kernel, 2d numpy array
:return: fwhm in units of pixels
| 3.522759
| 3.426331
| 1.028143
|
numPix_x, numPix_y = np.shape(data)
#data_center = int((numPix-1.)/2)
x_int = int(round(x_pos-0.49999))#+data_center
y_int = int(round(y_pos-0.49999))#+data_center
if x_int > 2 and x_int < numPix_x-2 and y_int > 2 and y_int < numPix_y-2:
mean_image = max(np.sum(data[y_int-2:y_int+3, x_int-2:x_int+3]), 0)
num = len(psf_kernel)
center = int((num-0.5)/2)
mean_kernel = np.sum(psf_kernel[center-2:center+3, center-2:center+3])
amp_estimated = mean_image/mean_kernel
else:
amp_estimated = 0
return amp_estimated
|
def estimate_amp(data, x_pos, y_pos, psf_kernel)
|
estimates the amplitude of a point source located at x_pos, y_pos
:param data:
:param x_pos:
:param y_pos:
:param deltaPix:
:return:
| 2.398666
| 2.441089
| 0.982621
|
if not hasattr(self, '_light_cdf') or new_compute is True:
r_array = np.linspace(self._min_interpolate, self._max_interpolate, self._interp_grid_num)
cum_sum = np.zeros_like(r_array)
sum = 0
for i, r in enumerate(r_array):
if i == 0:
cum_sum[i] = 0
else:
sum += self.light_2d(r, kwargs_list) * r
cum_sum[i] = copy.deepcopy(sum)
cum_sum_norm = cum_sum/cum_sum[-1]
f = interp1d(cum_sum_norm, r_array)
self._light_cdf = f
cdf_draw = np.random.uniform(0., 1, n)
r_draw = self._light_cdf(cdf_draw)
return r_draw
|
def draw_light_2d_linear(self, kwargs_list, n=1, new_compute=False, r_eff=1.)
|
constructs the CDF and draws from it random realizations of projected radii R
:param kwargs_list:
:return:
| 2.652346
| 2.558081
| 1.03685
|
if not hasattr(self, '_light_cdf_log') or new_compute is True:
r_array = np.logspace(np.log10(self._min_interpolate), np.log10(self._max_interpolate), self._interp_grid_num)
cum_sum = np.zeros_like(r_array)
sum = 0
for i, r in enumerate(r_array):
if i == 0:
cum_sum[i] = 0
else:
sum += self.light_2d(r, kwargs_list) * r * r
cum_sum[i] = copy.deepcopy(sum)
cum_sum_norm = cum_sum/cum_sum[-1]
f = interp1d(cum_sum_norm, np.log(r_array))
self._light_cdf_log = f
cdf_draw = np.random.uniform(0., 1, n)
r_log_draw = self._light_cdf_log(cdf_draw)
return np.exp(r_log_draw)
|
def draw_light_2d(self, kwargs_list, n=1, new_compute=False)
|
constructs the CDF and draws from it random realizations of projected radii R
:param kwargs_list:
:return:
| 2.62179
| 2.559465
| 1.024351
|
rho0_input = self._alpha2rho0(theta_Rs=theta_Rs, Rs=Rs)
if Rs < 0.0000001:
Rs = 0.0000001
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_**2 + y_**2)
f_x, f_y = self.nfwAlpha(R, Rs, rho0_input, x_, y_)
return f_x, f_y
|
def derivatives(self, x, y, Rs, theta_Rs, center_x=0, center_y=0)
|
returns df/dx and df/dy of the function (integral of NFW)
| 3.326046
| 3.02872
| 1.098169
|
return rho0/(R/Rs*(1+R/Rs)**2)
|
def density(self, R, Rs, rho0)
|
three dimenstional NFW profile
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:return: rho(R) density
| 8.230124
| 12.457923
| 0.660634
|
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_**2 + y_**2)
x = R/Rs
Fx = self.F_(x)
return 2*rho0*Rs*Fx
|
def density_2d(self, x, y, Rs, rho0, center_x=0, center_y=0)
|
projected two dimenstional NFW profile (kappa*Sigma_crit)
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param r200: radius of (sub)halo
:type r200: float>0
:return: Epsilon(R) projected density at radius R
| 3.709531
| 4.459777
| 0.831775
|
Rs = float(Rs)
m_3d = 4. * np.pi * rho0 * Rs**3 *(np.log((Rs + R)/Rs) - R/(Rs + R))
return m_3d
|
def mass_3d(self, R, Rs, rho0)
|
mass enclosed a 3d sphere or radius r
:param r:
:param Ra:
:param Rs:
:return:
| 3.89253
| 4.434577
| 0.877768
|
rho0 = self._alpha2rho0(theta_Rs, Rs)
m_3d = self.mass_3d(R, Rs, rho0)
return m_3d
|
def mass_3d_lens(self, R, Rs, theta_Rs)
|
mass enclosed a 3d sphere or radius r
:param r:
:param Ra:
:param Rs:
:return:
| 4.13266
| 4.903781
| 0.84275
|
x = R/Rs
gx = self.g_(x)
m_2d = 4*rho0*Rs*R**2*gx/x**2 * np.pi
return m_2d
|
def mass_2d(self, R, Rs, rho0)
|
mass enclosed a 3d sphere or radius r
:param r:
:param Ra:
:param Rs:
:return:
| 6.177757
| 7.463492
| 0.82773
|
if self._interpol:
if not hasattr(self, '_F_interp'):
if self._lookup:
x = self._x_lookup
F_x = self._f_lookup
else:
x = np.linspace(0, self._max_interp_X, self._num_interp_X)
F_x = self._F(x)
self._F_interp = interp.interp1d(x, F_x, kind='linear', axis=-1, copy=False, bounds_error=False,
fill_value=0, assume_sorted=True)
return self._F_interp(X)
else:
return self._F(X)
|
def F_(self, X)
|
computes h()
:param X:
:return:
| 2.70996
| 2.764895
| 0.980131
|
if isinstance(X, int) or isinstance(X, float):
if X < 1 and X > 0:
a = 1/(X**2-1)*(1-2/np.sqrt(1-X**2)*np.arctanh(np.sqrt((1-X)/(1+X))))
elif X == 1:
a = 1./3
elif X > 1:
a = 1/(X**2-1)*(1-2/np.sqrt(X**2-1)*np.arctan(np.sqrt((X-1)/(1+X))))
else: # X == 0:
c = 0.0000001
a = 1/(-1)*(1-2/np.sqrt(1)*np.arctanh(np.sqrt((1-c)/(1+c))))
else:
a = np.empty_like(X)
x = X[(X < 1) & (X > 0)]
a[(X < 1) & (X > 0)] = 1/(x**2-1)*(1-2/np.sqrt(1-x**2)*np.arctanh(np.sqrt((1-x)/(1+x))))
a[X == 1] = 1./3.
x = X[X > 1]
a[X > 1] = 1/(x**2-1)*(1-2/np.sqrt(x**2-1)*np.arctan(np.sqrt((x-1)/(1+x))))
# a[X>y] = 0
c = 0.0000001
a[X == 0] = 1/(-1)*(1-2/np.sqrt(1)*np.arctanh(np.sqrt((1-c)/(1+c))))
return a
|
def _F(self, X)
|
analytic solution of the projection integral
:param x: R/Rs
:type x: float >0
| 1.923286
| 1.950069
| 0.986266
|
if self._interpol:
if not hasattr(self, '_g_interp'):
if self._lookup:
x = self._x_lookup
g_x = self._g_lookup
else:
x = np.linspace(0, self._max_interp_X, self._num_interp_X)
g_x = self._g(x)
self._g_interp = interp.interp1d(x, g_x, kind='linear', axis=-1, copy=False, bounds_error=False,
fill_value=0, assume_sorted=True)
return self._g_interp(X)
else:
return self._g(X)
|
def g_(self, X)
|
computes h()
:param X:
:return:
| 2.635338
| 2.697693
| 0.976886
|
if self._interpol:
if not hasattr(self, '_h_interp'):
if self._lookup:
x = self._x_lookup
h_x = self._h_lookup
else:
x = np.linspace(0, self._max_interp_X, self._num_interp_X)
h_x = self._h(x)
self._h_interp = interp.interp1d(x, h_x, kind='linear', axis=-1, copy=False, bounds_error=False,
fill_value=0, assume_sorted=True)
return self._h_interp(X)
else:
return self._h(X)
|
def h_(self, X)
|
computes h()
:param X:
:return:
| 2.684128
| 2.676466
| 1.002863
|
rho0 = theta_Rs / (4. * Rs ** 2 * (1. + np.log(1. / 2.)))
return rho0
|
def _alpha2rho0(self, theta_Rs, Rs)
|
convert angle at Rs into rho0
| 4.883181
| 4.4611
| 1.094614
|
theta_Rs = rho0 * (4 * Rs ** 2 * (1 + np.log(1. / 2.)))
return theta_Rs
|
def _rho02alpha(self, rho0, Rs)
|
convert rho0 to angle at Rs
:param rho0:
:param Rs:
:return:
| 7.526702
| 7.230074
| 1.041027
|
name_list = []
for func in self.func_list:
name_list.append(func.param_names)
return name_list
|
def param_name_list(self)
|
returns the list of all parameter names
:return: list of list of strings (for each light model separately)
| 3.580527
| 3.344923
| 1.070436
|
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
flux = np.zeros_like(x)
for i, func in enumerate(self.func_list):
if k is None or k == i:
out = np.array(func.function(x, y, **kwargs_list[i]), dtype=float)
flux += out
return flux
|
def surface_brightness(self, x, y, kwargs_list, k=None)
|
:param x: coordinate in units of arcsec relative to the center of the image
:type x: set or single 1d numpy array
| 2.338593
| 2.622224
| 0.891836
|
r = np.array(r, dtype=float)
flux = np.zeros_like(r)
for i, func in enumerate(self.func_list):
if k is None or k == i:
kwargs = {k: v for k, v in kwargs_list[i].items() if not k in ['center_x', 'center_y']}
if self.profile_type_list[i] in ['HERNQUIST', 'HERNQUIST_ELLIPSE', 'PJAFFE', 'PJAFFE_ELLIPSE',
'GAUSSIAN', 'GAUSSIAN_ELLIPSE', 'MULTI_GAUSSIAN',
'MULTI_GAUSSIAN_ELLIPSE', 'POWER_LAW']:
flux += func.light_3d(r, **kwargs)
else:
raise ValueError('Light model %s does not support a 3d light distribution!'
% self.profile_type_list[i])
return flux
|
def light_3d(self, r, kwargs_list, k=None)
|
computes 3d density at radius r
:param x: coordinate in units of arcsec relative to the center of the image
:type x: set or single 1d numpy array
| 2.664351
| 2.915771
| 0.913772
|
norm_flux_list = []
for i, model in enumerate(self.profile_type_list):
if k is None or k == i:
if model in ['SERSIC', 'SERSIC_ELLIPSE', 'INTERPOL', 'GAUSSIAN', 'GAUSSIAN_ELLIPSE',
'MULTI_GAUSSIAN', 'MULTI_GAUSSIAN_ELLIPSE']:
kwargs_new = kwargs_list[i].copy()
if norm is True:
if model in ['MULTI_GAUSSIAN', 'MULTI_GAUSSIAN_ELLIPSE']:
new = {'amp': np.array(kwargs_new['amp'])/kwargs_new['amp'][0]}
else:
new = {'amp': 1}
kwargs_new.update(new)
norm_flux = self.func_list[i].total_flux(**kwargs_new)
norm_flux_list.append(norm_flux)
else:
raise ValueError("profile %s does not support flux normlization." % model)
# TODO implement total flux for e.g. 'HERNQUIST', 'HERNQUIST_ELLIPSE', 'PJAFFE', 'PJAFFE_ELLIPSE',
# 'GAUSSIAN', 'GAUSSIAN_ELLIPSE', 'POWER_LAW', 'NIE', 'CHAMELEON', 'DOUBLE_CHAMELEON', 'UNIFORM'
return norm_flux_list
|
def total_flux(self, kwargs_list, norm=False, k=None)
|
Computes the total flux of each individual light profile. This allows to estimate the total flux as
well as lenstronomy amp to magnitude conversions. Not all models are supported
:param kwargs_list: list of keyword arguments corresponding to the light profiles. The 'amp' parameter can be missing.
:param norm: bool, if True, computes the flux for amp=1
:param k: int, if set, only evaluates the specific light model
:return: list of (total) flux values attributed to each profile
| 2.98474
| 2.759187
| 1.081746
|
bn = self.b_n(n)
k = bn*Re**(-1./n)
return k, bn
|
def k_bn(self, n, Re)
|
returns normalisation of the sersic profile such that Re is the half light radius given n_sersic slope
| 8.016342
| 6.471511
| 1.238713
|
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
if isinstance(r, int) or isinstance(r, float):
r = max(self._s, r)
else:
r[r < self._s] = self._s
x_reduced = (r/r_eff)**(1./n_sersic)
return x_reduced
|
def _x_reduced(self, x, y, n_sersic, r_eff, center_x, center_y)
|
coordinate transform to normalized radius
:param x:
:param y:
:param center_x:
:param center_y:
:return:
| 2.378657
| 2.495155
| 0.95331
|
b = self.b_n(n_sersic)
alpha_eff = n_sersic * r_eff * k_eff * b**(-2*n_sersic) * np.exp(b) * special.gamma(2*n_sersic)
return -alpha_eff
|
def _alpha_eff(self, r_eff, n_sersic, k_eff)
|
deflection angle at r_eff
:param r_eff:
:param n_sersic:
:param k_eff:
:return:
| 4.018836
| 4.55539
| 0.882216
|
raise ValueError("not implemented! Use a Multi-Gaussian-component decomposition.")
|
def density(self, x, y, n_sersic, r_eff, k_eff, center_x=0, center_y=0)
|
de-projection of the Sersic profile based on
Prugniel & Simien (1997)
:return:
| 133.552673
| 141.96077
| 0.940772
|
bn = self.b_n(n_sersic)
return I_eff * r_eff**2 * 2 * np.pi * n_sersic * np.exp(bn) / bn**(2*n_sersic) * scipy.special.gamma(2*n_sersic)
|
def _total_flux(self, r_eff, I_eff, n_sersic)
|
computes total flux of a Sersic profile
:param r_eff: projected half light radius
:param I_eff: surface brightness at r_eff (in same units as r_eff)
:param n_sersic: Sersic index
:return: integrated flux to infinity
| 4.350215
| 4.766006
| 0.912759
|
gamma = self._gamma_limit(gamma)
x_ = x - center_x
y_ = y - center_y
E = theta_E / ((3. - gamma) / 2.) ** (1. / (1. - gamma))
# E = phi_E_spp
eta= -gamma + 3
p2 = x_**2+y_**2
s2 = 0. # softening
return 2 * E**2/eta**2 * ((p2 + s2)/E**2)**(eta/2)
|
def function(self, x, y, theta_E, gamma, center_x=0, center_y=0)
|
:param x: set of x-coordinates
:type x: array of size (n)
:param theta_E: Einstein radius of lense
:type theta_E: float.
:param gamma: power law slope of mass profifle
:type gamma: <2 float
:param q: Axis ratio
:type q: 0<q<1
:param phi_G: position angel of SES
:type q: 0<phi_G<pi/2
:returns: function
:raises: AttributeError, KeyError
| 6.828287
| 7.442542
| 0.917467
|
fac = np.sqrt(np.pi) * special.gamma(1. / 2 * (-1 + gamma)) / special.gamma(gamma / 2.) * 2 / (3 - gamma) * rho0
#fac = theta_E**(gamma - 1)
theta_E = fac**(1. / (gamma - 1))
return theta_E
|
def rho2theta(self, rho0, gamma)
|
converts 3d density into 2d projected density parameter
:param rho0:
:param gamma:
:return:
| 5.704965
| 5.957314
| 0.957641
|
fac1 = np.sqrt(np.pi) * special.gamma(1. / 2 * (-1 + gamma)) / special.gamma(gamma / 2.) * 2 / (3 - gamma)
fac2 = theta_E**(gamma - 1)
rho0 = fac2 / fac1
return rho0
|
def theta2rho(self, theta_E, gamma)
|
converts projected density parameter (in units of deflection) into 3d density parameter
:param theta_E:
:param gamma:
:return:
| 4.845932
| 5.132892
| 0.944094
|
mass_3d = 4 * np.pi * rho0 /(-gamma + 3) * r ** (-gamma + 3)
return mass_3d
|
def mass_3d(self, r, rho0, gamma)
|
mass enclosed a 3d sphere or radius r
:param r:
:param a:
:param s:
:return:
| 5.229198
| 7.366728
| 0.70984
|
alpha = np.sqrt(np.pi) * special.gamma(1. / 2 * (-1 + gamma)) / special.gamma(gamma / 2.) * r ** (2 - gamma)/(3 - gamma) *np.pi * 2 * rho0
mass_2d = alpha*r
return mass_2d
|
def mass_2d(self, r, rho0, gamma)
|
mass enclosed projected 2d sphere of radius r
:param r:
:param rho0:
:param a:
:param s:
:return:
| 5.961955
| 6.5123
| 0.915491
|
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
mass_3d = self.mass_3d(r, rho0, gamma)
pot = mass_3d/r
return pot
|
def grav_pot(self, x, y, rho0, gamma, center_x=0, center_y=0)
|
gravitational potential (modulo 4 pi G and rho0 in appropriate units)
:param x:
:param y:
:param rho0:
:param a:
:param s:
:param center_x:
:param center_y:
:return:
| 2.779139
| 3.297537
| 0.842792
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.