_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q14200
_explode_lines
train
def _explode_lines(shape): """ Return a list of LineStrings which make up the shape. """ if shape.geom_type == 'LineString': return [shape] elif shape.geom_type == 'MultiLineString': return shape.geoms elif shape.geom_type == 'GeometryCollection': lines = [] for geom in shape.geoms: lines.extend(_explode_lines(geom)) return lines return []
python
{ "resource": "" }
q14201
_orient
train
def _orient(shape): """ The Shapely version of the orient function appears to only work on Polygons, and fails on MultiPolygons. This is a quick wrapper to allow orienting of either. """ assert shape.geom_type in ('Polygon', 'MultiPolygon') if shape.geom_type == 'Polygon': return orient(shape) else: polys = [] for geom in shape.geoms: polys.append(orient(geom)) return MultiPolygon(polys)
python
{ "resource": "" }
q14202
GeomEncoder.parseGeometry
train
def parseGeometry(self, geometry): """ A factory method for creating objects of the correct OpenGIS type. """ self.coordinates = [] self.index = [] self.position = 0 self.lastX = 0 self.lastY = 0 self.isPoly = False self.isPoint = True; self.dropped = 0; self.first = True # Used for exception strings self._current_string = geometry reader = _ExtendedUnPacker(geometry) # Start the parsing self._dispatchNextType(reader)
python
{ "resource": "" }
q14203
calc_buffered_bounds
train
def calc_buffered_bounds( format, bounds, meters_per_pixel_dim, layer_name, geometry_type, buffer_cfg): """ Calculate the buffered bounds per format per layer based on config. """ if not buffer_cfg: return bounds format_buffer_cfg = buffer_cfg.get(format.extension) if format_buffer_cfg is None: return bounds geometry_type = normalize_geometry_type(geometry_type) per_layer_cfg = format_buffer_cfg.get('layer', {}).get(layer_name) if per_layer_cfg is not None: layer_geom_pixels = per_layer_cfg.get(geometry_type) if layer_geom_pixels is not None: assert isinstance(layer_geom_pixels, Number) result = bounds_buffer( bounds, meters_per_pixel_dim * layer_geom_pixels) return result by_geometry_pixels = format_buffer_cfg.get('geometry', {}).get( geometry_type) if by_geometry_pixels is not None: assert isinstance(by_geometry_pixels, Number) result = bounds_buffer( bounds, meters_per_pixel_dim * by_geometry_pixels) return result return bounds
python
{ "resource": "" }
q14204
_intersect_multipolygon
train
def _intersect_multipolygon(shape, tile_bounds, clip_bounds): """ Return the parts of the MultiPolygon shape which overlap the tile_bounds, each clipped to the clip_bounds. This can be used to extract only the parts of a multipolygon which are actually visible in the tile, while keeping those parts which extend beyond the tile clipped to avoid huge polygons. """ polys = [] for poly in shape.geoms: if tile_bounds.intersects(poly): if not clip_bounds.contains(poly): poly = clip_bounds.intersection(poly) # the intersection operation can make the resulting polygon # invalid. including it in a MultiPolygon would make that # invalid too. instead, we skip it, and hope it wasn't too # important. if not poly.is_valid: continue if poly.type == 'Polygon': polys.append(poly) elif poly.type == 'MultiPolygon': polys.extend(poly.geoms) return geometry.MultiPolygon(polys)
python
{ "resource": "" }
q14205
_clip_shape
train
def _clip_shape(shape, buffer_padded_bounds, is_clipped, clip_factor): """ Return the shape clipped to a clip_factor expansion of buffer_padded_bounds if is_clipped is True. Otherwise return the original shape, or None if the shape does not intersect buffer_padded_bounds at all. This is used to reduce the size of the geometries which are encoded in the tiles by removing things which aren't in the tile, and clipping those which are to the clip_factor expanded bounding box. """ shape_buf_bounds = geometry.box(*buffer_padded_bounds) if not shape_buf_bounds.intersects(shape): return None if is_clipped: # now we know that we should include the geometry, but # if the geometry should be clipped, we'll clip to the # layer-specific padded bounds layer_padded_bounds = calculate_padded_bounds( clip_factor, buffer_padded_bounds) if shape.type == 'MultiPolygon': shape = _intersect_multipolygon( shape, shape_buf_bounds, layer_padded_bounds) else: try: shape = shape.intersection(layer_padded_bounds) except shapely.errors.TopologicalError: return None return shape
python
{ "resource": "" }
q14206
BasicIDTokenHandler.now
train
def now(self): """ Capture time. """ if self._now is None: # Compute the current time only once per instance self._now = datetime.utcnow() return self._now
python
{ "resource": "" }
q14207
BasicIDTokenHandler.claim_exp
train
def claim_exp(self, data): """ Required expiration time. """ expiration = getattr(settings, 'OAUTH_ID_TOKEN_EXPIRATION', 30) expires = self.now + timedelta(seconds=expiration) return timegm(expires.utctimetuple())
python
{ "resource": "" }
q14208
Command._clean_required_args
train
def _clean_required_args(self, url, redirect_uri, client_type): """ Validate and clean the command's arguments. Arguments: url (str): Client's application URL. redirect_uri (str): Client application's OAuth2 callback URI. client_type (str): Client's type, indicating whether the Client application is capable of maintaining the confidentiality of its credentials (e.g., running on a secure server) or is incapable of doing so (e.g., running in a browser). Raises: CommandError, if the URLs provided are invalid, or if the client type provided is invalid. """ # Validate URLs for url_to_validate in (url, redirect_uri): try: URLValidator()(url_to_validate) except ValidationError: raise CommandError("URLs provided are invalid. Please provide valid application and redirect URLs.") # Validate and map client type to the appropriate django-oauth2-provider constant client_type = client_type.lower() client_type = { 'confidential': CONFIDENTIAL, 'public': PUBLIC }.get(client_type) if client_type is None: raise CommandError("Client type provided is invalid. Please use one of 'confidential' or 'public'.") self.fields = { # pylint: disable=attribute-defined-outside-init 'url': url, 'redirect_uri': redirect_uri, 'client_type': client_type, }
python
{ "resource": "" }
q14209
Command._parse_options
train
def _parse_options(self, options): """Parse the command's options. Arguments: options (dict): Options with which the command was called. Raises: CommandError, if a user matching the provided username does not exist. """ for key in ('username', 'client_name', 'client_id', 'client_secret', 'trusted', 'logout_uri'): value = options.get(key) if value is not None: self.fields[key] = value username = self.fields.pop('username', None) if username is not None: try: user_model = get_user_model() self.fields['user'] = user_model.objects.get(username=username) except user_model.DoesNotExist: raise CommandError("User matching the provided username does not exist.") # The keyword argument 'name' conflicts with that of `call_command()`. We instead # use 'client_name' up to this point, then swap it out for the expected field, 'name'. client_name = self.fields.pop('client_name', None) if client_name is not None: self.fields['name'] = client_name logout_uri = self.fields.get('logout_uri') if logout_uri: try: URLValidator()(logout_uri) except ValidationError: raise CommandError("The logout_uri is invalid.")
python
{ "resource": "" }
q14210
AccessTokenView.access_token_response_data
train
def access_token_response_data(self, access_token, response_type=None, nonce=''): """ Return `access_token` fields for OAuth2, and add `id_token` fields for OpenID Connect according to the `access_token` scope. """ # Clear the scope for requests that do not use OpenID Connect. # Scopes for pure OAuth2 request are currently not supported. scope = constants.DEFAULT_SCOPE extra_data = {} # Add OpenID Connect `id_token` if requested. # # TODO: Unfourtunately because of how django-oauth2-provider implements # scopes, we cannot check if `openid` is the first scope to be # requested, as required by OpenID Connect specification. if provider.scope.check(constants.OPEN_ID_SCOPE, access_token.scope): id_token = self.get_id_token(access_token, nonce) extra_data['id_token'] = self.encode_id_token(id_token) scope = provider.scope.to_int(*id_token.scopes) # Update the token scope, so it includes only authorized values. access_token.scope = scope access_token.save() # Get the main fields for OAuth2 response. response_data = super(AccessTokenView, self).access_token_response_data(access_token) # Add any additional fields if OpenID Connect is requested. The order of # the addition makes sures the OAuth2 values are not overrided. response_data = dict(extra_data.items() + response_data.items()) return response_data
python
{ "resource": "" }
q14211
AccessTokenView.get_id_token
train
def get_id_token(self, access_token, nonce): """ Return an ID token for the given Access Token. """ claims_string = self.request.POST.get('claims') claims_request = json.loads(claims_string) if claims_string else {} return oidc.id_token(access_token, nonce, claims_request)
python
{ "resource": "" }
q14212
AccessTokenView.encode_id_token
train
def encode_id_token(self, id_token): """ Return encoded ID token. """ # Encode the ID token using the `client_secret`. # # TODO: Using the `client_secret` is not ideal, since it is transmitted # over the wire in some authentication flows. A better alternative is # to use the public key of the issuer, which also allows the ID token to # be shared among clients. Doing so however adds some operational # costs. We should consider this for the future. secret = id_token.access_token.client.client_secret return id_token.encode(secret)
python
{ "resource": "" }
q14213
UserInfoView.get
train
def get(self, request, *_args, **_kwargs): """ Respond to a UserInfo request. Two optional query parameters are accepted, scope and claims. See the references above for more details. """ access_token = self.access_token scope_string = request.GET.get('scope') scope_request = scope_string.split() if scope_string else None claims_string = request.GET.get('claims') claims_request = json.loads(claims_string) if claims_string else None if not provider.scope.check(constants.OPEN_ID_SCOPE, access_token.scope): return self._bad_request('Missing openid scope.') try: claims = self.userinfo_claims(access_token, scope_request, claims_request) except ValueError, exception: return self._bad_request(str(exception)) # TODO: Encode and sign responses if requested. response = JsonResponse(claims) return response
python
{ "resource": "" }
q14214
UserInfoView.userinfo_claims
train
def userinfo_claims(self, access_token, scope_request, claims_request): """ Return the claims for the requested parameters. """ id_token = oidc.userinfo(access_token, scope_request, claims_request) return id_token.claims
python
{ "resource": "" }
q14215
collect
train
def collect(handlers, access_token, scope_request=None, claims_request=None): """ Collect all the claims values from the `handlers`. Arguments: handlers (list): List of claim :class:`Handler` classes. access_token (:class:AccessToken): Associated access token. scope_request (list): List of requested scopes. claims_request (dict): Dictionary with only the relevant section of a OpenID Connect claims request. Returns a list of the scopes from `scope_request` that are authorized, and a dictionary of the claims associated with the authorized scopes in `scope_request`, and additionally, the authorized claims listed in `claims_request`. """ user = access_token.user client = access_token.client # Instantiate handlers. Each handler is instanciated only once, allowing the # handler to keep state in-between calls to its scope and claim methods. handlers = [cls() for cls in handlers] # Find all authorized scopes by including the access_token scopes. Note # that the handlers determine if a scope is authorized, not its presense in # the access_token. required_scopes = set(REQUIRED_SCOPES) token_scopes = set(provider.scope.to_names(access_token.scope)) authorized_scopes = _collect_scopes(handlers, required_scopes | token_scopes, user, client) # Select only the authorized scopes from the requested scopes. scope_request = set(scope_request) if scope_request else set() scopes = required_scopes | (authorized_scopes & scope_request) # Find all authorized claims names for the authorized_scopes. authorized_names = _collect_names(handlers, authorized_scopes, user, client) # Select only the requested claims if no scope has been requested. Selecting # scopes has prevalence over selecting claims. claims_request = _validate_claim_request(claims_request) # Add the requested claims that are authorized to the response. requested_names = set(claims_request.keys()) & authorized_names names = _collect_names(handlers, scopes, user, client) | requested_names # Get the values for the claims. claims = _collect_values( handlers, names=names, user=user, client=client, values=claims_request or {} ) return authorized_scopes, claims
python
{ "resource": "" }
q14216
_collect_scopes
train
def _collect_scopes(handlers, scopes, user, client): """ Get a set of all the authorized scopes according to the handlers. """ results = set() data = {'user': user, 'client': client} def visitor(scope_name, func): claim_names = func(data) # If the claim_names is None, it means that the scope is not authorized. if claim_names is not None: results.add(scope_name) _visit_handlers(handlers, visitor, 'scope', scopes) return results
python
{ "resource": "" }
q14217
_collect_names
train
def _collect_names(handlers, scopes, user, client): """ Get the names of the claims supported by the handlers for the requested scope. """ results = set() data = {'user': user, 'client': client} def visitor(_scope_name, func): claim_names = func(data) # If the claim_names is None, it means that the scope is not authorized. if claim_names is not None: results.update(claim_names) _visit_handlers(handlers, visitor, 'scope', scopes) return results
python
{ "resource": "" }
q14218
_collect_values
train
def _collect_values(handlers, names, user, client, values): """ Get the values from the handlers of the requested claims. """ results = {} def visitor(claim_name, func): data = {'user': user, 'client': client} data.update(values.get(claim_name) or {}) claim_value = func(data) # If the claim_value is None, it means that the claim is not authorized. if claim_value is not None: # New values overwrite previous results results[claim_name] = claim_value _visit_handlers(handlers, visitor, 'claim', names) return results
python
{ "resource": "" }
q14219
_validate_claim_values
train
def _validate_claim_values(name, value, ignore_errors): """ Helper for `validate_claim_request` """ results = {'essential': False} for key, value in value.iteritems(): if key in CLAIM_REQUEST_FIELDS: results[key] = value else: if not ignore_errors: msg = 'Unknown attribute {} in claim value {}.'.format(key, name) raise ValueError(msg) return results
python
{ "resource": "" }
q14220
_visit_handlers
train
def _visit_handlers(handlers, visitor, prefix, suffixes): """ Use visitor partern to collect information from handlers """ results = [] for handler in handlers: for suffix in suffixes: func = getattr(handler, '{}_{}'.format(prefix, suffix).lower(), None) if func: results.append(visitor(suffix, func)) return results
python
{ "resource": "" }
q14221
CreateView.update_model
train
def update_model(self, model): """ trivial implementation for simple data in the form, using the model prefix. """ for k, v in self.parse_form().items(): setattr(model, k, v)
python
{ "resource": "" }
q14222
locale_negotiator
train
def locale_negotiator(request): """Locale negotiator base on the `Accept-Language` header""" locale = 'en' if request.accept_language: locale = request.accept_language.best_match(LANGUAGES) locale = LANGUAGES.get(locale, 'en') return locale
python
{ "resource": "" }
q14223
main
train
def main(global_config, **settings): """ Get a PyShop WSGI application configured with settings. """ if sys.version_info[0] < 3: reload(sys) sys.setdefaultencoding('utf-8') settings = dict(settings) # Scoping sessions for Pyramid ensure session are commit/rollback # after the template has been rendered create_engine(settings, scoped=True) authn_policy = RouteSwitchAuthPolicy(secret=settings['pyshop.cookie_key'], callback=groupfinder) authz_policy = ACLPolicy() route_prefix = settings.get('pyshop.route_prefix') config = Configurator(settings=settings, root_factory=RootFactory, route_prefix=route_prefix, locale_negotiator=locale_negotiator, authentication_policy=authn_policy, authorization_policy=authz_policy) config.end() return config.make_wsgi_app()
python
{ "resource": "" }
q14224
User.by_login
train
def by_login(cls, session, login, local=True): """ Get a user from a given login. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param login: the user login :type login: unicode :return: the associated user :rtype: :class:`pyshop.models.User` """ user = cls.first(session, where=((cls.login == login), (cls.local == local),) ) # XXX it's appear that this is not case sensitive ! return user if user and user.login == login else None
python
{ "resource": "" }
q14225
User.by_credentials
train
def by_credentials(cls, session, login, password): """ Get a user from given credentials :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param login: username :type login: unicode :param password: user password :type password: unicode :return: associated user :rtype: :class:`pyshop.models.User` """ user = cls.by_login(session, login, local=True) if not user: return None if crypt.check(user.password, password): return user
python
{ "resource": "" }
q14226
User.get_locals
train
def get_locals(cls, session, **kwargs): """ Get all local users. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :return: local users :rtype: generator of :class:`pyshop.models.User` """ return cls.find(session, where=(cls.local == True,), order_by=cls.login, **kwargs)
python
{ "resource": "" }
q14227
User.validate
train
def validate(self, session): """ Validate that the current user can be saved. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :return: ``True`` :rtype: bool :raise: :class:`pyshop.helpers.sqla.ModelError` if user is not valid """ errors = [] if not self.login: errors.append(u'login is required') else: other = User.by_login(session, self.login) if other and other.id != self.id: errors.append(u'duplicate login %s' % self.login) if not self.password: errors.append(u'password is required') if not self.email: errors.append(u'email is required') elif not re_email.match(self.email): errors.append(u'%s is not a valid email' % self.email) if len(errors): raise ModelError(errors) return True
python
{ "resource": "" }
q14228
Classifier.by_name
train
def by_name(cls, session, name, **kwargs): """ Get a classifier from a given name. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param name: name of the classifier :type name: `unicode :return: classifier instance :rtype: :class:`pyshop.models.Classifier` """ classifier = cls.first(session, where=(cls.name == name,)) if not kwargs.get('create_if_not_exists', False): return classifier if not classifier: splitted_names = [n.strip() for n in name.split(u'::')] classifiers = [u' :: '.join(splitted_names[:i + 1]) for i in range(len(splitted_names))] parent_id = None category = splitted_names[0] for c in classifiers: classifier = cls.first(session, where=(cls.name == c,)) if not classifier: classifier = Classifier(name=c, parent_id=parent_id, category=category) session.add(classifier) session.flush() parent_id = classifier.id return classifier
python
{ "resource": "" }
q14229
Package.sorted_releases
train
def sorted_releases(self): """ Releases sorted by version. """ releases = [(parse_version(release.version), release) for release in self.releases] releases.sort(reverse=True) return [release[1] for release in releases]
python
{ "resource": "" }
q14230
Package.by_filter
train
def by_filter(cls, session, opts, **kwargs): """ Get packages from given filters. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param opts: filtering options :type opts: `dict :return: package instances :rtype: generator of :class:`pyshop.models.Package` """ where = [] if opts.get('local_only'): where.append(cls.local == True) if opts.get('names'): where.append(cls.name.in_(opts['names'])) if opts.get('classifiers'): ids = [c.id for c in opts.get('classifiers')] cls_pkg = classifier__package qry = session.query(cls_pkg.c.package_id, func.count('*')) qry = qry.filter(cls_pkg.c.classifier_id.in_(ids)) qry = qry.group_by(cls_pkg.c.package_id) qry = qry.having(func.count('*') >= len(ids)) where.append(cls.id.in_([r[0] for r in qry.all()])) return cls.find(session, where=where, **kwargs)
python
{ "resource": "" }
q14231
Package.by_owner
train
def by_owner(cls, session, owner_name): """ Get packages from a given owner username. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param owner_name: owner username :type owner_name: unicode :return: package instances :rtype: generator of :class:`pyshop.models.Package` """ return cls.find(session, join=(cls.owners), where=(User.login == owner_name,), order_by=cls.name)
python
{ "resource": "" }
q14232
Package.by_maintainer
train
def by_maintainer(cls, session, maintainer_name): """ Get package from a given maintainer name. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param maintainer_name: maintainer username :type maintainer_name: unicode :return: package instances :rtype: generator of :class:`pyshop.models.Package` """ return cls.find(session, join=(cls.maintainers), where=(User.login == maintainer_name,), order_by=cls.name)
python
{ "resource": "" }
q14233
Package.get_locals
train
def get_locals(cls, session): """ Get all local packages. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :return: package instances :rtype: generator of :class:`pyshop.models.Package` """ return cls.find(session, where=(cls.local == True,))
python
{ "resource": "" }
q14234
Package.get_mirrored
train
def get_mirrored(cls, session): """ Get all mirrored packages. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :return: package instances :rtype: generator of :class:`pyshop.models.Package` """ return cls.find(session, where=(cls.local == False,))
python
{ "resource": "" }
q14235
Release.by_version
train
def by_version(cls, session, package_name, version): """ Get release for a given version. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param package_name: package name :type package_name: unicode :param version: version :type version: unicode :return: release instance :rtype: :class:`pyshop.models.Release` """ return cls.first(session, join=(Package,), where=((Package.name == package_name), (cls.version == version)))
python
{ "resource": "" }
q14236
Release.by_classifiers
train
def by_classifiers(cls, session, classifiers): """ Get releases for given classifiers. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param classifiers: classifiers :type classifiers: unicode :return: release instances :rtype: generator of :class:`pyshop.models.Release` """ return cls.find(session, join=(cls.classifiers,), where=(Classifier.name.in_(classifiers),), )
python
{ "resource": "" }
q14237
Release.search
train
def search(cls, session, opts, operator): """ Get releases for given filters. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param opts: filtering options :type opts: dict :param operator: filtering options joining operator (`and` or `or`) :type operator: basestring :return: release instances :rtype: generator of :class:`pyshop.models.Release` """ available = {'name': Package.name, 'version': cls.version, 'author': User.login, 'author_email': User.email, 'maintainer': User.login, 'maintainer_email': User.email, 'home_page': cls.home_page, 'license': cls.license, 'summary': cls.summary, 'description': cls.description, 'keywords': cls.keywords, 'platform': cls.platform, 'download_url': cls.download_url } oper = {'or': or_, 'and': and_} join_map = {'name': Package, 'author': cls.author, 'author_email': cls.author, 'maintainer': cls.maintainer, 'maintainer_email': cls.maintainer, } where = [] join = [] for opt, val in opts.items(): field = available[opt] if hasattr(val, '__iter__') and len(val) > 1: stmt = or_(*[field.like(u'%%%s%%' % v) for v in val]) else: stmt = field.like(u'%%%s%%' % val) where.append(stmt) if opt in join_map: join.append(join_map[opt]) return cls.find(session, join=join, where=(oper[operator](*where),))
python
{ "resource": "" }
q14238
ReleaseFile.by_release
train
def by_release(cls, session, package_name, version): """ Get release files for a given package name and for a given version. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param package_name: package name :type package_name: unicode :param version: version :type version: unicode :return: release files :rtype: generator of :class:`pyshop.models.ReleaseFile` """ return cls.find(session, join=(Release, Package), where=(Package.name == package_name, Release.version == version, ))
python
{ "resource": "" }
q14239
ReleaseFile.by_filename
train
def by_filename(cls, session, release, filename): """ Get a release file for a given release and a given filename. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param release: release :type release: :class:`pyshop.models.Release` :param filename: filename of the release file :type filename: unicode :return: release file :rtype: :class:`pyshop.models.ReleaseFile` """ return cls.first(session, where=(ReleaseFile.release_id == release.id, ReleaseFile.filename == filename, ))
python
{ "resource": "" }
q14240
add_urlhelpers
train
def add_urlhelpers(event): """ Add helpers to the template engine. """ event['static_url'] = lambda x: static_path(x, event['request']) event['route_url'] = lambda name, *args, **kwargs: \ route_path(name, event['request'], *args, **kwargs) event['parse_rest'] = parse_rest event['has_permission'] = event['request'].has_permission
python
{ "resource": "" }
q14241
list_packages
train
def list_packages(request): """ Retrieve a list of the package names registered with the package index. Returns a list of name strings. """ session = DBSession() names = [p.name for p in Package.all(session, order_by=Package.name)] return names
python
{ "resource": "" }
q14242
package_releases
train
def package_releases(request, package_name, show_hidden=False): """ Retrieve a list of the releases registered for the given package_name. Returns a list with all version strings if show_hidden is True or only the non-hidden ones otherwise.""" session = DBSession() package = Package.by_name(session, package_name) return [rel.version for rel in package.sorted_releases]
python
{ "resource": "" }
q14243
package_roles
train
def package_roles(request, package_name): """ Retrieve a list of users and their attributes roles for a given package_name. Role is either 'Maintainer' or 'Owner'. """ session = DBSession() package = Package.by_name(session, package_name) owners = [('Owner', o.name) for o in package.owners] maintainers = [('Maintainer', o.name) for o in package.maintainers] return owners + maintainers
python
{ "resource": "" }
q14244
release_downloads
train
def release_downloads(request, package_name, version): """ Retrieve a list of files and download count for a given package and release version. """ session = DBSession() release_files = ReleaseFile.by_release(session, package_name, version) if release_files: release_files = [(f.release.package.name, f.filename) for f in release_files] return release_files
python
{ "resource": "" }
q14245
search
train
def search(request, spec, operator='and'): """ Search the package database using the indicated search spec. The spec may include any of the keywords described in the above list (except 'stable_version' and 'classifiers'), for example: {'description': 'spam'} will search description fields. Within the spec, a field's value can be a string or a list of strings (the values within the list are combined with an OR), for example: {'name': ['foo', 'bar']}. Valid keys for the spec dict are listed here. Invalid keys are ignored: name version author author_email maintainer maintainer_email home_page license summary description keywords platform download_url Arguments for different fields are combined using either "and" (the default) or "or". Example: search({'name': 'foo', 'description': 'bar'}, 'or'). The results are returned as a list of dicts {'name': package name, 'version': package release version, 'summary': package release summary} """ api = pypi.proxy rv = [] # search in proxy for k, v in spec.items(): rv += api.search({k: v}, True) # search in local session = DBSession() release = Release.search(session, spec, operator) rv += [{'name': r.package.name, 'version': r.version, 'summary': r.summary, # hack https://mail.python.org/pipermail/catalog-sig/2012-October/004633.html '_pypi_ordering':'', } for r in release] return rv
python
{ "resource": "" }
q14246
set_proxy
train
def set_proxy(proxy_url, transport_proxy=None): """Create the proxy to PyPI XML-RPC Server""" global proxy, PYPI_URL PYPI_URL = proxy_url proxy = xmlrpc.ServerProxy( proxy_url, transport=RequestsTransport(proxy_url.startswith('https://')), allow_none=True)
python
{ "resource": "" }
q14247
authbasic
train
def authbasic(request): """ Authentification basic, Upload pyshop repository access """ if len(request.environ.get('HTTP_AUTHORIZATION', '')) > 0: auth = request.environ.get('HTTP_AUTHORIZATION') scheme, data = auth.split(None, 1) assert scheme.lower() == 'basic' data = base64.b64decode(data) if not isinstance(data, unicode): data = data.decode('utf-8') username, password = data.split(':', 1) if User.by_ldap_credentials( DBSession(), username, password, request.registry.settings): return HTTPFound(location=request.url) if User.by_credentials(DBSession(), username, password): return HTTPFound(location=request.url) return Response(status=401, headerlist=[(str('WWW-Authenticate'), str('Basic realm="pyshop repository access"'), )], )
python
{ "resource": "" }
q14248
decode_b64
train
def decode_b64(data): '''Wrapper for b64decode, without having to struggle with bytestrings.''' byte_string = data.encode('utf-8') decoded = base64.b64decode(byte_string) return decoded.decode('utf-8')
python
{ "resource": "" }
q14249
encode_b64
train
def encode_b64(data): '''Wrapper for b64encode, without having to struggle with bytestrings.''' byte_string = data.encode('utf-8') encoded = base64.b64encode(byte_string) return encoded.decode('utf-8')
python
{ "resource": "" }
q14250
de_shift_kernel
train
def de_shift_kernel(kernel, shift_x, shift_y, iterations=20): """ de-shifts a shifted kernel to the center of a pixel. This is performed iteratively. The input kernel is the solution of a linear interpolated shift of a sharper kernel centered in the middle of the pixel. To find the de-shifted kernel, we perform an iterative correction of proposed de-shifted kernels and compare its shifted version with the input kernel. :param kernel: (shifted) kernel, e.g. a star in an image that is not centered in the pixel grid :param shift_x: x-offset relative to the center of the pixel (sub-pixel shift) :param shift_y: y-offset relative to the center of the pixel (sub-pixel shift) :return: de-shifted kernel such that the interpolated shift boy (shift_x, shift_y) results in the input kernel """ nx, ny = np.shape(kernel) kernel_new = np.zeros((nx+2, ny+2)) + (kernel[0, 0] + kernel[0, -1] + kernel[-1, 0] + kernel[-1, -1]) / 4. kernel_new[1:-1, 1:-1] = kernel int_shift_x = int(round(shift_x)) frac_x_shift = shift_x - int_shift_x int_shift_y = int(round(shift_y)) frac_y_shift = shift_y - int_shift_y kernel_init = copy.deepcopy(kernel_new) kernel_init_shifted = copy.deepcopy(interp.shift(kernel_init, [int_shift_y, int_shift_x], order=1)) kernel_new = interp.shift(kernel_new, [int_shift_y, int_shift_x], order=1) norm = np.sum(kernel_init_shifted) for i in range(iterations): kernel_shifted_inv = interp.shift(kernel_new, [-frac_y_shift, -frac_x_shift], order=1) delta = kernel_init_shifted - kernel_norm(kernel_shifted_inv) * norm kernel_new += delta * 1. kernel_new = kernel_norm(kernel_new) * norm return kernel_new[1:-1, 1:-1]
python
{ "resource": "" }
q14251
center_kernel
train
def center_kernel(kernel, iterations=20): """ given a kernel that might not be perfectly centered, this routine computes its light weighted center and then moves the center in an iterative process such that it is centered :param kernel: 2d array (odd numbers) :param iterations: int, number of iterations :return: centered kernel """ kernel = kernel_norm(kernel) nx, ny = np.shape(kernel) if nx %2 == 0: raise ValueError("kernel needs odd number of pixels") # make coordinate grid of kernel x_grid, y_grid = util.make_grid(nx, deltapix=1, left_lower=False) # compute 1st moments to get light weighted center x_w = np.sum(kernel * util.array2image(x_grid)) y_w = np.sum(kernel * util.array2image(y_grid)) # de-shift kernel kernel_centered = de_shift_kernel(kernel, shift_x=-x_w, shift_y=-y_w, iterations=iterations) return kernel_norm(kernel_centered)
python
{ "resource": "" }
q14252
subgrid_kernel
train
def subgrid_kernel(kernel, subgrid_res, odd=False, num_iter=100): """ creates a higher resolution kernel with subgrid resolution as an interpolation of the original kernel in an iterative approach :param kernel: initial kernel :param subgrid_res: subgrid resolution required :return: kernel with higher resolution (larger) """ subgrid_res = int(subgrid_res) if subgrid_res == 1: return kernel nx, ny = np.shape(kernel) d_x = 1. / nx x_in = np.linspace(d_x/2, 1-d_x/2, nx) d_y = 1. / nx y_in = np.linspace(d_y/2, 1-d_y/2, ny) nx_new = nx * subgrid_res ny_new = ny * subgrid_res if odd is True: if nx_new % 2 == 0: nx_new -= 1 if ny_new % 2 == 0: ny_new -= 1 d_x_new = 1. / nx_new d_y_new = 1. / ny_new x_out = np.linspace(d_x_new/2., 1-d_x_new/2., nx_new) y_out = np.linspace(d_y_new/2., 1-d_y_new/2., ny_new) kernel_input = copy.deepcopy(kernel) kernel_subgrid = image_util.re_size_array(x_in, y_in, kernel_input, x_out, y_out) kernel_subgrid = kernel_norm(kernel_subgrid) for i in range(max(num_iter, 1)): # given a proposition, re-size it to original pixel size if subgrid_res % 2 == 0: kernel_pixel = averaging_even_kernel(kernel_subgrid, subgrid_res) else: kernel_pixel = util.averaging(kernel_subgrid, numGrid=nx_new, numPix=nx) delta = kernel - kernel_pixel #plt.matshow(delta) #plt.colorbar() #plt.show() temp_kernel = kernel_input + delta kernel_subgrid = image_util.re_size_array(x_in, y_in, temp_kernel, x_out, y_out)#/norm_subgrid kernel_subgrid = kernel_norm(kernel_subgrid) kernel_input = temp_kernel #from scipy.ndimage import zoom #ratio = subgrid_res #kernel_subgrid = zoom(kernel, ratio, order=4) / ratio ** 2 #print(np.shape(kernel_subgrid)) # whatever has not been matched is added to zeroth order (in squares of the undersampled PSF) if subgrid_res % 2 == 0: return kernel_subgrid kernel_pixel = util.averaging(kernel_subgrid, numGrid=nx_new, numPix=nx) kernel_pixel = kernel_norm(kernel_pixel) delta_kernel = kernel_pixel - kernel_norm(kernel) id = np.ones((subgrid_res, subgrid_res)) delta_kernel_sub = np.kron(delta_kernel, id)/subgrid_res**2 return kernel_norm(kernel_subgrid - delta_kernel_sub)
python
{ "resource": "" }
q14253
pixel_kernel
train
def pixel_kernel(point_source_kernel, subgrid_res=7): """ converts a pixelised kernel of a point source to a kernel representing a uniform extended pixel :param point_source_kernel: :param subgrid_res: :return: convolution kernel for an extended pixel """ kernel_subgrid = subgrid_kernel(point_source_kernel, subgrid_res, num_iter=10) kernel_size = len(point_source_kernel) kernel_pixel = np.zeros((kernel_size*subgrid_res, kernel_size*subgrid_res)) for i in range(subgrid_res): k_x = int((kernel_size-1) / 2 * subgrid_res + i) for j in range(subgrid_res): k_y = int((kernel_size-1) / 2 * subgrid_res + j) kernel_pixel = image_util.add_layer2image(kernel_pixel, k_x, k_y, kernel_subgrid) kernel_pixel = util.averaging(kernel_pixel, numGrid=kernel_size*subgrid_res, numPix=kernel_size) return kernel_norm(kernel_pixel)
python
{ "resource": "" }
q14254
split_kernel
train
def split_kernel(kernel, kernel_subgrid, subsampling_size, subgrid_res): """ pixel kernel and subsampling kernel such that the convolution of both applied on an image can be performed, i.e. smaller subsampling PSF and hole in larger PSF :param kernel: PSF kernel of the size of the pixel :param kernel_subgrid: subsampled kernel :param subsampling_size: size of subsampling PSF in units of image pixels :return: pixel and subsampling kernel """ n = len(kernel) n_sub = len(kernel_subgrid) if subsampling_size % 2 == 0: subsampling_size += 1 if subsampling_size > n: subsampling_size = n kernel_hole = copy.deepcopy(kernel) n_min = int((n-1)/2 - (subsampling_size-1)/2) n_max = int((n-1)/2 + (subsampling_size-1)/2 + 1) kernel_hole[n_min:n_max, n_min:n_max] = 0 n_min_sub = int((n_sub - 1) / 2 - (subsampling_size*subgrid_res - 1) / 2) n_max_sub = int((n_sub - 1) / 2 + (subsampling_size * subgrid_res - 1) / 2 + 1) kernel_subgrid_cut = kernel_subgrid[n_min_sub:n_max_sub, n_min_sub:n_max_sub] flux_subsampled = np.sum(kernel_subgrid_cut) flux_hole = np.sum(kernel_hole) if flux_hole > 0: kernel_hole *= (1. - flux_subsampled) / np.sum(kernel_hole) else: kernel_subgrid_cut /= np.sum(kernel_subgrid_cut) return kernel_hole, kernel_subgrid_cut
python
{ "resource": "" }
q14255
NFW.density
train
def density(self, R, Rs, rho0): """ three dimenstional NFW profile :param R: radius of interest :type R: float/numpy array :param Rs: scale radius :type Rs: float :param rho0: density normalization (characteristic density) :type rho0: float :return: rho(R) density """ return rho0/(R/Rs*(1+R/Rs)**2)
python
{ "resource": "" }
q14256
NFW._alpha2rho0
train
def _alpha2rho0(self, theta_Rs, Rs): """ convert angle at Rs into rho0 """ rho0 = theta_Rs / (4. * Rs ** 2 * (1. + np.log(1. / 2.))) return rho0
python
{ "resource": "" }
q14257
NFW._rho02alpha
train
def _rho02alpha(self, rho0, Rs): """ convert rho0 to angle at Rs :param rho0: :param Rs: :return: """ theta_Rs = rho0 * (4 * Rs ** 2 * (1 + np.log(1. / 2.))) return theta_Rs
python
{ "resource": "" }
q14258
LightModel.param_name_list
train
def param_name_list(self): """ returns the list of all parameter names :return: list of list of strings (for each light model separately) """ name_list = [] for func in self.func_list: name_list.append(func.param_names) return name_list
python
{ "resource": "" }
q14259
LightModel.total_flux
train
def total_flux(self, kwargs_list, norm=False, k=None): """ Computes the total flux of each individual light profile. This allows to estimate the total flux as well as lenstronomy amp to magnitude conversions. Not all models are supported :param kwargs_list: list of keyword arguments corresponding to the light profiles. The 'amp' parameter can be missing. :param norm: bool, if True, computes the flux for amp=1 :param k: int, if set, only evaluates the specific light model :return: list of (total) flux values attributed to each profile """ norm_flux_list = [] for i, model in enumerate(self.profile_type_list): if k is None or k == i: if model in ['SERSIC', 'SERSIC_ELLIPSE', 'INTERPOL', 'GAUSSIAN', 'GAUSSIAN_ELLIPSE', 'MULTI_GAUSSIAN', 'MULTI_GAUSSIAN_ELLIPSE']: kwargs_new = kwargs_list[i].copy() if norm is True: if model in ['MULTI_GAUSSIAN', 'MULTI_GAUSSIAN_ELLIPSE']: new = {'amp': np.array(kwargs_new['amp'])/kwargs_new['amp'][0]} else: new = {'amp': 1} kwargs_new.update(new) norm_flux = self.func_list[i].total_flux(**kwargs_new) norm_flux_list.append(norm_flux) else: raise ValueError("profile %s does not support flux normlization." % model) # TODO implement total flux for e.g. 'HERNQUIST', 'HERNQUIST_ELLIPSE', 'PJAFFE', 'PJAFFE_ELLIPSE', # 'GAUSSIAN', 'GAUSSIAN_ELLIPSE', 'POWER_LAW', 'NIE', 'CHAMELEON', 'DOUBLE_CHAMELEON', 'UNIFORM' return norm_flux_list
python
{ "resource": "" }
q14260
SersicUtil.k_bn
train
def k_bn(self, n, Re): """ returns normalisation of the sersic profile such that Re is the half light radius given n_sersic slope """ bn = self.b_n(n) k = bn*Re**(-1./n) return k, bn
python
{ "resource": "" }
q14261
SersicUtil._total_flux
train
def _total_flux(self, r_eff, I_eff, n_sersic): """ computes total flux of a Sersic profile :param r_eff: projected half light radius :param I_eff: surface brightness at r_eff (in same units as r_eff) :param n_sersic: Sersic index :return: integrated flux to infinity """ bn = self.b_n(n_sersic) return I_eff * r_eff**2 * 2 * np.pi * n_sersic * np.exp(bn) / bn**(2*n_sersic) * scipy.special.gamma(2*n_sersic)
python
{ "resource": "" }
q14262
Galkin.sigma2_R
train
def sigma2_R(self, R, kwargs_mass, kwargs_light, kwargs_anisotropy): """ returns unweighted los velocity dispersion for a specified projected radius :param R: 2d projected radius (in angular units) :param kwargs_mass: mass model parameters (following lenstronomy lens model conventions) :param kwargs_light: deflector light parameters (following lenstronomy light model conventions) :param kwargs_anisotropy: anisotropy parameters, may vary according to anisotropy type chosen. We refer to the Anisotropy() class for details on the parameters. :return: """ I_R_sigma2 = self.I_R_simga2(R, kwargs_mass, kwargs_light, kwargs_anisotropy) I_R = self.lightProfile.light_2d(R, kwargs_light) return I_R_sigma2 / I_R
python
{ "resource": "" }
q14263
make_grid_with_coordtransform
train
def make_grid_with_coordtransform(numPix, deltapix, subgrid_res=1, left_lower=False, inverse=True): """ same as make_grid routine, but returns the transformaton matrix and shift between coordinates and pixel :param numPix: :param deltapix: :param subgrid_res: :param left_lower: sets the zero point at the lower left corner of the pixels :param inverse: bool, if true sets East as left, otherwise East is righrt :return: """ numPix_eff = numPix*subgrid_res deltapix_eff = deltapix/float(subgrid_res) a = np.arange(numPix_eff) matrix = np.dstack(np.meshgrid(a, a)).reshape(-1, 2) if inverse is True: delta_x = -deltapix_eff else: delta_x = deltapix_eff if left_lower is True: x_grid = matrix[:, 0]*deltapix y_grid = matrix[:, 1]*deltapix else: x_grid = (matrix[:, 0] - (numPix_eff-1)/2.)*delta_x y_grid = (matrix[:, 1] - (numPix_eff-1)/2.)*deltapix_eff shift = (subgrid_res-1)/(2.*subgrid_res)*deltapix x_grid -= shift y_grid -= shift ra_at_xy_0 = x_grid[0] dec_at_xy_0 = y_grid[0] x_at_radec_0 = (numPix_eff - 1) / 2. y_at_radec_0 = (numPix_eff - 1) / 2. Mpix2coord = np.array([[delta_x, 0], [0, deltapix_eff]]) Mcoord2pix = np.linalg.inv(Mpix2coord) return x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix
python
{ "resource": "" }
q14264
grid_from_coordinate_transform
train
def grid_from_coordinate_transform(numPix, Mpix2coord, ra_at_xy_0, dec_at_xy_0): """ return a grid in x and y coordinates that satisfy the coordinate system :param numPix: :param Mpix2coord: :param ra_at_xy_0: :param dec_at_xy_0: :return: """ a = np.arange(numPix) matrix = np.dstack(np.meshgrid(a, a)).reshape(-1, 2) x_grid = matrix[:, 0] y_grid = matrix[:, 1] ra_grid = x_grid * Mpix2coord[0, 0] + y_grid * Mpix2coord[0, 1] + ra_at_xy_0 dec_grid = x_grid * Mpix2coord[1, 0] + y_grid * Mpix2coord[1, 1] + dec_at_xy_0 return ra_grid, dec_grid
python
{ "resource": "" }
q14265
displaceAbs
train
def displaceAbs(x, y, sourcePos_x, sourcePos_y): """ calculates a grid of distances to the observer in angel :param mapped_cartcoord: mapped cartesian coordinates :type mapped_cartcoord: numpy array (n,2) :param sourcePos: source position :type sourcePos: numpy vector [x0,y0] :returns: array of displacement :raises: AttributeError, KeyError """ x_mapped = x - sourcePos_x y_mapped = y - sourcePos_y absmapped = np.sqrt(x_mapped**2+y_mapped**2) return absmapped
python
{ "resource": "" }
q14266
CoreSersic.function
train
def function(self, x, y, amp, R_sersic, Re, n_sersic, gamma, e1, e2, center_x=0, center_y=0, alpha=3.): """ returns Core-Sersic function """ phi_G, q = param_util.ellipticity2phi_q(e1, e2) Rb = R_sersic x_shift = x - center_x y_shift = y - center_y cos_phi = np.cos(phi_G) sin_phi = np.sin(phi_G) xt1 = cos_phi*x_shift+sin_phi*y_shift xt2 = -sin_phi*x_shift+cos_phi*y_shift xt2difq2 = xt2/(q*q) R_ = np.sqrt(xt1*xt1+xt2*xt2difq2) #R_ = R_.astype(np.float32) if isinstance(R_, int) or isinstance(R_, float): R_ = max(self._smoothing, R_) else: R_[R_ < self._smoothing] = self._smoothing if isinstance(R_, int) or isinstance(R_, float): R = max(self._smoothing, R_) else: R=np.empty_like(R_) _R = R_[R_ > self._smoothing] #in the SIS regime R[R_ <= self._smoothing] = self._smoothing R[R_ > self._smoothing] = _R k, bn = self.k_bn(n_sersic, Re) result = amp * (1 + (Rb / R) ** alpha) ** (gamma / alpha) * np.exp(-bn * (((R ** alpha + Rb ** alpha) / Re ** alpha) ** (1. / (alpha * n_sersic)) - 1.)) return np.nan_to_num(result)
python
{ "resource": "" }
q14267
CNFW.density
train
def density(self, R, Rs, rho0, r_core): """ three dimenstional truncated NFW profile :param R: radius of interest :type R: float/numpy array :param Rs: scale radius :type Rs: float :param rho0: density normalization (central core density) :type rho0: float :return: rho(R) density """ M0 = 4*np.pi*rho0 * Rs ** 3 return (M0/4/np.pi) * ((r_core + R)*(R + Rs)**2) ** -1
python
{ "resource": "" }
q14268
PSF.set_pixel_size
train
def set_pixel_size(self, deltaPix): """ update pixel size :param deltaPix: :return: """ self._pixel_size = deltaPix if self.psf_type == 'GAUSSIAN': try: del self._kernel_point_source except: pass
python
{ "resource": "" }
q14269
PSF.psf_convolution
train
def psf_convolution(self, grid, grid_scale, psf_subgrid=False, subgrid_res=1): """ convolves a given pixel grid with a PSF """ psf_type = self.psf_type if psf_type == 'NONE': return grid elif psf_type == 'GAUSSIAN': sigma = self._sigma_gaussian/grid_scale img_conv = ndimage.filters.gaussian_filter(grid, sigma, mode='nearest', truncate=self._truncation) return img_conv elif psf_type == 'PIXEL': if psf_subgrid: kernel = self.subgrid_pixel_kernel(subgrid_res) else: kernel = self._kernel_pixel img_conv1 = signal.fftconvolve(grid, kernel, mode='same') return img_conv1 else: raise ValueError('PSF type %s not valid!' % psf_type)
python
{ "resource": "" }
q14270
TimeDelayLikelihood._logL_delays
train
def _logL_delays(self, delays_model, delays_measured, delays_errors): """ log likelihood of modeled delays vs measured time delays under considerations of errors :param delays_model: n delays of the model (not relative delays) :param delays_measured: relative delays (1-2,1-3,1-4) relative to the first in the list :param delays_errors: gaussian errors on the measured delays :return: log likelihood of data given model """ delta_t_model = np.array(delays_model[1:]) - delays_model[0] logL = np.sum(-(delta_t_model - delays_measured) ** 2 / (2 * delays_errors ** 2)) return logL
python
{ "resource": "" }
q14271
LensModelPlot.plot_main
train
def plot_main(self, with_caustics=False, image_names=False): """ print the main plots together in a joint frame :return: """ f, axes = plt.subplots(2, 3, figsize=(16, 8)) self.data_plot(ax=axes[0, 0]) self.model_plot(ax=axes[0, 1], image_names=True) self.normalized_residual_plot(ax=axes[0, 2], v_min=-6, v_max=6) self.source_plot(ax=axes[1, 0], deltaPix_source=0.01, numPix=100, with_caustics=with_caustics) self.convergence_plot(ax=axes[1, 1], v_max=1) self.magnification_plot(ax=axes[1, 2]) f.tight_layout() f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05) return f, axes
python
{ "resource": "" }
q14272
LensModelPlot.plot_separate
train
def plot_separate(self): """ plot the different model components separately :return: """ f, axes = plt.subplots(2, 3, figsize=(16, 8)) self.decomposition_plot(ax=axes[0, 0], text='Lens light', lens_light_add=True, unconvolved=True) self.decomposition_plot(ax=axes[1, 0], text='Lens light convolved', lens_light_add=True) self.decomposition_plot(ax=axes[0, 1], text='Source light', source_add=True, unconvolved=True) self.decomposition_plot(ax=axes[1, 1], text='Source light convolved', source_add=True) self.decomposition_plot(ax=axes[0, 2], text='All components', source_add=True, lens_light_add=True, unconvolved=True) self.decomposition_plot(ax=axes[1, 2], text='All components convolved', source_add=True, lens_light_add=True, point_source_add=True) f.tight_layout() f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05) return f, axes
python
{ "resource": "" }
q14273
LensModelPlot.plot_subtract_from_data_all
train
def plot_subtract_from_data_all(self): """ subtract model components from data :return: """ f, axes = plt.subplots(2, 3, figsize=(16, 8)) self.subtract_from_data_plot(ax=axes[0, 0], text='Data') self.subtract_from_data_plot(ax=axes[0, 1], text='Data - Point Source', point_source_add=True) self.subtract_from_data_plot(ax=axes[0, 2], text='Data - Lens Light', lens_light_add=True) self.subtract_from_data_plot(ax=axes[1, 0], text='Data - Source Light', source_add=True) self.subtract_from_data_plot(ax=axes[1, 1], text='Data - Source Light - Point Source', source_add=True, point_source_add=True) self.subtract_from_data_plot(ax=axes[1, 2], text='Data - Lens Light - Point Source', lens_light_add=True, point_source_add=True) f.tight_layout() f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05) return f, axes
python
{ "resource": "" }
q14274
PolarShapelets._chi_lr
train
def _chi_lr(self,r, phi, nl,nr,beta): """ computes the generalized polar basis function in the convention of Massey&Refregier eqn 8 :param nl: left basis :type nl: int :param nr: right basis :type nr: int :param beta: beta --the characteristic scale typically choosen to be close to the size of the object. :type beta: float. :param coord: coordinates [r,phi] :type coord: array(n,2) :returns: values at positions of coordinates. :raises: AttributeError, KeyError """ m=int((nr-nl).real) n=int((nr+nl).real) p=int((n-abs(m))/2) p2=int((n+abs(m))/2) q=int(abs(m)) if p % 2==0: #if p is even prefac=1 else: prefac=-1 prefactor=prefac/beta**(abs(m)+1)*np.sqrt(math.factorial(p)/(np.pi*math.factorial(p2))) poly=self.poly[p][q] return prefactor*r**q*poly((r/beta)**2)*np.exp(-(r/beta)**2/2)*np.exp(-1j*m*phi)
python
{ "resource": "" }
q14275
TNFW.nfwPot
train
def nfwPot(self, R, Rs, rho0, r_trunc): """ lensing potential of NFW profile :param R: radius of interest :type R: float/numpy array :param Rs: scale radius :type Rs: float :param rho0: density normalization (characteristic density) :type rho0: float :return: Epsilon(R) projected density at radius R """ x = R / Rs tau = float(r_trunc) / Rs hx = self._h(x, tau) return 2 * rho0 * Rs ** 3 * hx
python
{ "resource": "" }
q14276
TNFW._g
train
def _g(self, x, tau): """ analytic solution of integral for NFW profile to compute deflection angel and gamma :param x: R/Rs :type x: float >0 """ return tau ** 2 * (tau ** 2 + 1) ** -2 * ( (tau ** 2 + 1 + 2 * (x ** 2 - 1)) * self.F(x) + tau * np.pi + (tau ** 2 - 1) * np.log(tau) + np.sqrt(tau ** 2 + x ** 2) * (-np.pi + self.L(x, tau) * (tau ** 2 - 1) * tau ** -1))
python
{ "resource": "" }
q14277
transform_e1e2
train
def transform_e1e2(x, y, e1, e2, center_x=0, center_y=0): """ maps the coordinates x, y with eccentricities e1 e2 into a new elliptical coordiante system :param x: :param y: :param e1: :param e2: :param center_x: :param center_y: :return: """ x_shift = x - center_x y_shift = y - center_y x_ = (1-e1) * x_shift - e2 * y_shift y_ = -e2 * x_shift + (1 + e1) * y_shift det = np.sqrt((1-e1)*(1+e1) + e2**2) return x_ / det, y_ / det
python
{ "resource": "" }
q14278
LensProp.time_delays
train
def time_delays(self, kwargs_lens, kwargs_ps, kappa_ext=0): """ predicts the time delays of the image positions :param kwargs_lens: lens model parameters :param kwargs_ps: point source parameters :param kappa_ext: external convergence (optional) :return: time delays at image positions for the fixed cosmology """ fermat_pot = self.lens_analysis.fermat_potential(kwargs_lens, kwargs_ps) time_delay = self.lensCosmo.time_delay_units(fermat_pot, kappa_ext) return time_delay
python
{ "resource": "" }
q14279
LensProp.velocity_dispersion
train
def velocity_dispersion(self, kwargs_lens, kwargs_lens_light, lens_light_model_bool_list=None, aniso_param=1, r_eff=None, R_slit=0.81, dR_slit=0.1, psf_fwhm=0.7, num_evaluate=1000): """ computes the LOS velocity dispersion of the lens within a slit of size R_slit x dR_slit and seeing psf_fwhm. The assumptions are a Hernquist light profile and the spherical power-law lens model at the first position. Further information can be found in the AnalyticKinematics() class. :param kwargs_lens: lens model parameters :param kwargs_lens_light: deflector light parameters :param aniso_param: scaled r_ani with respect to the half light radius :param r_eff: half light radius, if not provided, will be computed from the lens light model :param R_slit: width of the slit :param dR_slit: length of the slit :param psf_fwhm: full width at half maximum of the seeing condition :param num_evaluate: number of spectral rendering of the light distribution that end up on the slit :return: velocity dispersion in units [km/s] """ gamma = kwargs_lens[0]['gamma'] if 'center_x' in kwargs_lens_light[0]: center_x, center_y = kwargs_lens_light[0]['center_x'], kwargs_lens_light[0]['center_y'] else: center_x, center_y = 0, 0 if r_eff is None: r_eff = self.lens_analysis.half_light_radius_lens(kwargs_lens_light, center_x=center_x, center_y=center_y, model_bool_list=lens_light_model_bool_list) theta_E = kwargs_lens[0]['theta_E'] r_ani = aniso_param * r_eff sigma2 = self.analytic_kinematics.vel_disp(gamma, theta_E, r_eff, r_ani, R_slit, dR_slit, FWHM=psf_fwhm, rendering_number=num_evaluate) return sigma2
python
{ "resource": "" }
q14280
Moffat.function
train
def function(self, x, y, amp, alpha, beta, center_x, center_y): """ returns Moffat profile """ x_shift = x - center_x y_shift = y - center_y return amp * (1. + (x_shift**2+y_shift**2)/alpha**2)**(-beta)
python
{ "resource": "" }
q14281
Sampler.mcmc_CH
train
def mcmc_CH(self, walkerRatio, n_run, n_burn, mean_start, sigma_start, threadCount=1, init_pos=None, mpi=False): """ runs mcmc on the parameter space given parameter bounds with CosmoHammerSampler returns the chain """ lowerLimit, upperLimit = self.lower_limit, self.upper_limit mean_start = np.maximum(lowerLimit, mean_start) mean_start = np.minimum(upperLimit, mean_start) low_start = mean_start - sigma_start high_start = mean_start + sigma_start low_start = np.maximum(lowerLimit, low_start) high_start = np.minimum(upperLimit, high_start) sigma_start = (high_start - low_start) / 2 mean_start = (high_start + low_start) / 2 params = np.array([mean_start, lowerLimit, upperLimit, sigma_start]).T chain = LikelihoodComputationChain( min=lowerLimit, max=upperLimit) temp_dir = tempfile.mkdtemp("Hammer") file_prefix = os.path.join(temp_dir, "logs") #file_prefix = "./lenstronomy_debug" # chain.addCoreModule(CambCoreModule()) chain.addLikelihoodModule(self.chain) chain.setup() store = InMemoryStorageUtil() #store = None if mpi is True: sampler = MpiCosmoHammerSampler( params=params, likelihoodComputationChain=chain, filePrefix=file_prefix, walkersRatio=walkerRatio, burninIterations=n_burn, sampleIterations=n_run, threadCount=1, initPositionGenerator=init_pos, storageUtil=store) else: sampler = CosmoHammerSampler( params=params, likelihoodComputationChain=chain, filePrefix=file_prefix, walkersRatio=walkerRatio, burninIterations=n_burn, sampleIterations=n_run, threadCount=threadCount, initPositionGenerator=init_pos, storageUtil=store) time_start = time.time() if sampler.isMaster(): print('Computing the MCMC...') print('Number of walkers = ', len(mean_start)*walkerRatio) print('Burn-in iterations: ', n_burn) print('Sampling iterations:', n_run) sampler.startSampling() if sampler.isMaster(): time_end = time.time() print(time_end - time_start, 'time taken for MCMC sampling') # if sampler._sampler.pool is not None: # sampler._sampler.pool.close() try: shutil.rmtree(temp_dir) except Exception as ex: print(ex, 'shutil.rmtree did not work') pass #samples = np.loadtxt(file_prefix+".out") #prob = np.loadtxt(file_prefix+"prob.out") return store.samples, store.prob
python
{ "resource": "" }
q14282
Param.image2source_plane
train
def image2source_plane(self, kwargs_source, kwargs_lens, image_plane=False): """ maps the image plane position definition of the source plane :param kwargs_source: :param kwargs_lens: :return: """ kwargs_source_copy = copy.deepcopy(kwargs_source) for i, kwargs in enumerate(kwargs_source_copy): if self._image_plane_source_list[i] is True and not image_plane: if 'center_x' in kwargs: x_mapped, y_mapped = self._image2SourceMapping.image2source(kwargs['center_x'], kwargs['center_y'], kwargs_lens, idex_source=i) kwargs['center_x'] = x_mapped kwargs['center_y'] = y_mapped return kwargs_source_copy
python
{ "resource": "" }
q14283
Param.update_lens_scaling
train
def update_lens_scaling(self, kwargs_cosmo, kwargs_lens, inverse=False): """ multiplies the scaling parameters of the profiles :param args: :param kwargs_lens: :param i: :param inverse: :return: """ kwargs_lens_updated = copy.deepcopy(kwargs_lens) if self._mass_scaling is False: return kwargs_lens_updated scale_factor_list = np.array(kwargs_cosmo['scale_factor']) if inverse is True: scale_factor_list = 1. / np.array(kwargs_cosmo['scale_factor']) for i, kwargs in enumerate(kwargs_lens_updated): if self._mass_scaling_list[i] is not False: scale_factor = scale_factor_list[self._mass_scaling_list[i] - 1] if 'theta_E' in kwargs: kwargs['theta_E'] *= scale_factor elif 'theta_Rs' in kwargs: kwargs['theta_Rs'] *= scale_factor elif 'sigma0' in kwargs: kwargs['sigma0'] *= scale_factor elif 'k_eff' in kwargs: kwargs['k_eff'] *= scale_factor return kwargs_lens_updated
python
{ "resource": "" }
q14284
Param.print_setting
train
def print_setting(self): """ prints the setting of the parameter class :return: """ num, param_list = self.num_param() num_linear = self.num_param_linear() print("The following model options are chosen:") print("Lens models:", self._lens_model_list) print("Source models:", self._source_light_model_list) print("Lens light models:", self._lens_light_model_list) print("Point source models:", self._point_source_model_list) print("===================") print("The following parameters are being fixed:") print("Lens:", self.lensParams.kwargs_fixed) print("Source:", self.souceParams.kwargs_fixed) print("Lens light:", self.lensLightParams.kwargs_fixed) print("Point source:", self.pointSourceParams.kwargs_fixed) print("===================") print("Joint parameters for different models") print("Joint lens with lens:", self._joint_lens_with_lens) print("Joint lens with lens light:", self._joint_lens_light_with_lens_light) print("Joint source with source:", self._joint_source_with_source) print("Joint lens with light:", self._joint_lens_with_light) print("Joint source with point source:", self._joint_source_with_point_source) print("===================") print("Number of non-linear parameters being sampled: ", num) print("Parameters being sampled: ", param_list) print("Number of linear parameters being solved for: ", num_linear)
python
{ "resource": "" }
q14285
MultiExposures._array2image_list
train
def _array2image_list(self, array): """ maps 1d vector of joint exposures in list of 2d images of single exposures :param array: 1d numpy array :return: list of 2d numpy arrays of size of exposures """ image_list = [] k = 0 for i in range(self._num_bands): if self._compute_bool[i] is True: num_data = self.num_response_list[i] array_i = array[k:k + num_data] image_i = self._imageModel_list[i].ImageNumerics.array2image(array_i) image_list.append(image_i) k += num_data return image_list
python
{ "resource": "" }
q14286
Solver.check_solver
train
def check_solver(self, image_x, image_y, kwargs_lens): """ returns the precision of the solver to match the image position :param kwargs_lens: full lens model (including solved parameters) :param image_x: point source in image :param image_y: point source in image :return: precision of Euclidean distances between the different rays arriving at the image positions """ source_x, source_y = self._lensModel.ray_shooting(image_x, image_y, kwargs_lens) dist = np.sqrt((source_x - source_x[0]) ** 2 + (source_y - source_y[0]) ** 2) return dist
python
{ "resource": "" }
q14287
LensCosmo.nfw_angle2physical
train
def nfw_angle2physical(self, Rs_angle, theta_Rs): """ converts the angular parameters into the physical ones for an NFW profile :param theta_Rs: observed bending angle at the scale radius in units of arcsec :param Rs: scale radius in units of arcsec :return: M200, r200, Rs_physical, c """ Rs = Rs_angle * const.arcsec * self.D_d theta_scaled = theta_Rs * self.epsilon_crit * self.D_d * const.arcsec rho0 = theta_scaled / (4 * Rs ** 2 * (1 + np.log(1. / 2.))) rho0_com = rho0 / self.h**2 * self.a_z(self.z_lens)**3 c = self.nfw_param.c_rho0(rho0_com) r200 = c * Rs M200 = self.nfw_param.M_r200(r200 * self.h / self.a_z(self.z_lens)) / self.h return rho0, Rs, c, r200, M200
python
{ "resource": "" }
q14288
LensCosmo.nfw_physical2angle
train
def nfw_physical2angle(self, M, c): """ converts the physical mass and concentration parameter of an NFW profile into the lensing quantities :param M: mass enclosed 200 rho_crit in units of M_sun :param c: NFW concentration parameter (r200/r_s) :return: theta_Rs (observed bending angle at the scale radius, Rs_angle (angle at scale radius) (in units of arcsec) """ rho0, Rs, r200 = self.nfwParam_physical(M, c) Rs_angle = Rs / self.D_d / const.arcsec # Rs in arcsec theta_Rs = rho0 * (4 * Rs ** 2 * (1 + np.log(1. / 2.))) return Rs_angle, theta_Rs / self.epsilon_crit / self.D_d / const.arcsec
python
{ "resource": "" }
q14289
sqrt
train
def sqrt(inputArray, scale_min=None, scale_max=None): """Performs sqrt scaling of the input numpy array. @type inputArray: numpy array @param inputArray: image data array @type scale_min: float @param scale_min: minimum data value @type scale_max: float @param scale_max: maximum data value @rtype: numpy array @return: image data array """ imageData = np.array(inputArray, copy=True) if scale_min is None: scale_min = imageData.min() if scale_max is None: scale_max = imageData.max() imageData = imageData.clip(min=scale_min, max=scale_max) imageData = imageData - scale_min indices = np.where(imageData < 0) imageData[indices] = 0.0 imageData = np.sqrt(imageData) imageData = imageData / math.sqrt(scale_max - scale_min) return imageData
python
{ "resource": "" }
q14290
LikelihoodModule.effectiv_num_data_points
train
def effectiv_num_data_points(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps): """ returns the effective number of data points considered in the X2 estimation to compute the reduced X2 value """ num_linear = 0 if self._image_likelihood is True: num_linear = self.image_likelihood.num_param_linear(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps) num_param, _ = self.param.num_param() return self.num_data - num_param - num_linear
python
{ "resource": "" }
q14291
JeansSolver.sigma_r2
train
def sigma_r2(self, r, kwargs_profile, kwargs_anisotropy, kwargs_light): """ solves radial Jeans equation """ if self._mass_profile == 'power_law': if self._anisotropy_type == 'r_ani': if self._light_profile == 'Hernquist': sigma_r = self.power_law_anisotropy(r, kwargs_profile, kwargs_anisotropy, kwargs_light) else: raise ValueError('light profile %s not supported for Jeans solver' % self._light_profile) else: raise ValueError('anisotropy type %s not implemented in Jeans equation modelling' % self._anisotropy_type) else: raise ValueError('mass profile type %s not implemented in Jeans solver' % self._mass_profile) return sigma_r
python
{ "resource": "" }
q14292
MCMCSampler.mcmc_emcee
train
def mcmc_emcee(self, n_walkers, n_run, n_burn, mean_start, sigma_start): """ returns the mcmc analysis of the parameter space """ sampler = emcee.EnsembleSampler(n_walkers, self.cosmoParam.numParam, self.chain.likelihood) p0 = emcee.utils.sample_ball(mean_start, sigma_start, n_walkers) new_pos, _, _, _ = sampler.run_mcmc(p0, n_burn) sampler.reset() store = InMemoryStorageUtil() for pos, prob, _, _ in sampler.sample(new_pos, iterations=n_run): store.persistSamplingValues(pos, prob, None) return store.samples
python
{ "resource": "" }
q14293
data_configure_simple
train
def data_configure_simple(numPix, deltaPix, exposure_time=1, sigma_bkg=1, inverse=False): """ configures the data keyword arguments with a coordinate grid centered at zero. :param numPix: number of pixel (numPix x numPix) :param deltaPix: pixel size (in angular units) :param exposure_time: exposure time :param sigma_bkg: background noise (Gaussian sigma) :param inverse: if True, coordinate system is ra to the left, if False, to the right :return: keyword arguments that can be used to construct a Data() class instance of lenstronomy """ mean = 0. # background mean flux (default zero) # 1d list of coordinates (x,y) of a numPix x numPix square grid, centered to zero x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(numPix=numPix, deltapix=deltaPix, subgrid_res=1, inverse=inverse) # mask (1= model this pixel, 0= leave blanck) exposure_map = np.ones((numPix, numPix)) * exposure_time # individual exposure time/weight per pixel kwargs_data = { 'background_rms': sigma_bkg, 'exposure_map': exposure_map , 'ra_at_xy_0': ra_at_xy_0, 'dec_at_xy_0': dec_at_xy_0, 'transform_pix2angle': Mpix2coord , 'image_data': np.zeros((numPix, numPix)) } return kwargs_data
python
{ "resource": "" }
q14294
findOverlap
train
def findOverlap(x_mins, y_mins, min_distance): """ finds overlapping solutions, deletes multiples and deletes non-solutions and if it is not a solution, deleted as well """ n = len(x_mins) idex = [] for i in range(n): if i == 0: pass else: for j in range(0, i): if (abs(x_mins[i] - x_mins[j]) < min_distance and abs(y_mins[i] - y_mins[j]) < min_distance): idex.append(i) break x_mins = np.delete(x_mins, idex, axis=0) y_mins = np.delete(y_mins, idex, axis=0) return x_mins, y_mins
python
{ "resource": "" }
q14295
Data.covariance_matrix
train
def covariance_matrix(self, data, background_rms=1, exposure_map=1, noise_map=None, verbose=False): """ returns a diagonal matrix for the covariance estimation which describes the error Notes: - the exposure map must be positive definite. Values that deviate too much from the mean exposure time will be given a lower limit to not under-predict the Poisson component of the noise. - the data must be positive semi-definite for the Poisson noise estimate. Values < 0 (Possible after mean subtraction) will not have a Poisson component in their noise estimate. :param data: data array, eg in units of photons/second :param background_rms: background noise rms, eg. in units (photons/second)^2 :param exposure_map: exposure time per pixel, e.g. in units of seconds :return: len(d) x len(d) matrix that give the error of background and Poisson components; (photons/second)^2 """ if noise_map is not None: return noise_map**2 if isinstance(exposure_map, int) or isinstance(exposure_map, float): if exposure_map <= 0: exposure_map = 1 else: mean_exp_time = np.mean(exposure_map) exposure_map[exposure_map < mean_exp_time / 10] = mean_exp_time / 10 if verbose: if background_rms * np.max(exposure_map) < 1: print("WARNING! sigma_b*f %s < 1 count may introduce unstable error estimates" % (background_rms * np.max(exposure_map))) d_pos = np.zeros_like(data) #threshold = 1.5*sigma_b d_pos[data >= 0] = data[data >= 0] #d_pos[d < threshold] = 0 sigma = d_pos / exposure_map + background_rms ** 2 return sigma
python
{ "resource": "" }
q14296
LensEquationSolver.image_position_stochastic
train
def image_position_stochastic(self, source_x, source_y, kwargs_lens, search_window=10, precision_limit=10**(-10), arrival_time_sort=True, x_center=0, y_center=0, num_random=1000, verbose=False): """ Solves the lens equation stochastically with the scipy minimization routine on the quadratic distance between the backwards ray-shooted proposed image position and the source position. Credits to Giulia Pagano :param source_x: source position :param source_y: source position :param kwargs_lens: lens model list of keyword arguments :param search_window: angular size of search window :param precision_limit: limit required on the precision in the source plane :param arrival_time_sort: bool, if True sorts according to arrival time :param x_center: center of search window :param y_center: center of search window :param num_random: number of random starting points of the non-linear solver in the search window :param verbose: bool, if True, prints performance information :return: x_image, y_image """ x_solve, y_solve = [], [] for i in range(num_random): x_init = np.random.uniform(-search_window / 2., search_window / 2) + x_center y_init = np.random.uniform(-search_window / 2., search_window / 2) + y_center xinitial = np.array([x_init, y_init]) result = minimize(self._root, xinitial, args=(kwargs_lens, source_x, source_y), tol=precision_limit ** 2, method='Nelder-Mead') if self._root(result.x, kwargs_lens, source_x, source_y) < precision_limit**2: x_solve.append(result.x[0]) y_solve.append(result.x[1]) x_mins, y_mins = image_util.findOverlap(x_solve, y_solve, precision_limit) if arrival_time_sort is True: x_mins, y_mins = self.sort_arrival_times(x_mins, y_mins, kwargs_lens) return x_mins, y_mins
python
{ "resource": "" }
q14297
LensEquationSolver.image_position_from_source
train
def image_position_from_source(self, sourcePos_x, sourcePos_y, kwargs_lens, min_distance=0.1, search_window=10, precision_limit=10**(-10), num_iter_max=100, arrival_time_sort=True, initial_guess_cut=True, verbose=False, x_center=0, y_center=0, num_random=0): """ finds image position source position and lense model :param sourcePos_x: source position in units of angle :param sourcePos_y: source position in units of angle :param kwargs_lens: lens model parameters as keyword arguments :param min_distance: minimum separation to consider for two images in units of angle :param search_window: window size to be considered by the solver. Will not find image position outside this window :param precision_limit: required precision in the lens equation solver (in units of angle in the source plane). :param num_iter_max: maximum iteration of lens-source mapping conducted by solver to match the required precision :param arrival_time_sort: bool, if True, sorts image position in arrival time (first arrival photon first listed) :param initial_guess_cut: bool, if True, cuts initial local minima selected by the grid search based on distance criteria from the source position :param verbose: bool, if True, prints some useful information for the user :param x_center: float, center of the window to search for point sources :param y_center: float, center of the window to search for point sources :returns: (exact) angular position of (multiple) images ra_pos, dec_pos in units of angle :raises: AttributeError, KeyError """ # compute number of pixels to cover the search window with the required min_distance numPix = int(round(search_window / min_distance) + 0.5) x_grid, y_grid = util.make_grid(numPix, min_distance) x_grid += x_center y_grid += y_center # ray-shoot to find the relative distance to the required source position for each grid point x_mapped, y_mapped = self.lensModel.ray_shooting(x_grid, y_grid, kwargs_lens) absmapped = util.displaceAbs(x_mapped, y_mapped, sourcePos_x, sourcePos_y) # select minima in the grid points and select grid points that do not deviate more than the # width of the grid point to a solution of the lens equation x_mins, y_mins, delta_map = util.neighborSelect(absmapped, x_grid, y_grid) if verbose is True: print("There are %s regions identified that could contain a solution of the lens equation" % len(x_mins)) #mag = np.abs(mag) #print(x_mins, y_mins, 'before requirement of min_distance') if initial_guess_cut is True: mag = np.abs(self.lensModel.magnification(x_mins, y_mins, kwargs_lens)) mag[mag < 1] = 1 x_mins = x_mins[delta_map <= min_distance*mag*5] y_mins = y_mins[delta_map <= min_distance*mag*5] if verbose is True: print("The number of regions that meet the plausibility criteria are %s" % len(x_mins)) x_mins = np.append(x_mins, np.random.uniform(low=-search_window/2+x_center, high=search_window/2+x_center, size=num_random)) y_mins = np.append(y_mins, np.random.uniform(low=-search_window / 2 + y_center, high=search_window / 2 + y_center, size=num_random)) # iterative solving of the lens equation for the selected grid points x_mins, y_mins, solver_precision = self._findIterative(x_mins, y_mins, sourcePos_x, sourcePos_y, kwargs_lens, precision_limit, num_iter_max, verbose=verbose, min_distance=min_distance) # only select iterative results that match the precision limit x_mins = x_mins[solver_precision <= precision_limit] y_mins = y_mins[solver_precision <= precision_limit] # find redundant solutions within the min_distance criterion x_mins, y_mins = image_util.findOverlap(x_mins, y_mins, min_distance) if arrival_time_sort is True: x_mins, y_mins = self.sort_arrival_times(x_mins, y_mins, kwargs_lens) #x_mins, y_mins = lenstronomy_util.coordInImage(x_mins, y_mins, numPix, deltapix) return x_mins, y_mins
python
{ "resource": "" }
q14298
PointSourceParam.check_positive_flux
train
def check_positive_flux(cls, kwargs_ps): """ check whether inferred linear parameters are positive :param kwargs_ps: :return: bool """ pos_bool = True for kwargs in kwargs_ps: point_amp = kwargs['point_amp'] for amp in point_amp: if amp < 0: pos_bool = False break return pos_bool
python
{ "resource": "" }
q14299
FittingSequence.best_fit_likelihood
train
def best_fit_likelihood(self): """ returns the log likelihood of the best fit model of the current state of this class :return: log likelihood, float """ kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_cosmo = self.best_fit(bijective=False) param_class = self._param_class likelihoodModule = self.likelihoodModule logL, _ = likelihoodModule.logL(param_class.kwargs2args(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_cosmo)) return logL
python
{ "resource": "" }