_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q33000
load_model
train
def load_model(file_path): """ Loads an ONNX model to a ProtoBuf object. :param file_path: ONNX file (full file name) :return: ONNX model. Example: :: from onnxmltools.utils import load_model onnx_model = load_model("SqueezeNet.onnx") """ if not path.exists(file_path): raise FileNotFoundError("{0} was not found.".format(file_path)) model = onnx_proto.ModelProto() with open(file_path, 'rb') as f: model.ParseFromString(f.read()) return model
python
{ "resource": "" }
q33001
set_model_domain
train
def set_model_domain(model, domain): """ Sets the domain on the ONNX model. :param model: instance of an ONNX model :param domain: string containing the domain name of the model Example: :: from onnxmltools.utils import set_model_domain onnx_model = load_model("SqueezeNet.onnx") set_model_domain(onnx_model, "com.acme") """ if model is None or not isinstance(model, onnx_proto.ModelProto): raise ValueError("Model is not a valid ONNX model.") if not convert_utils.is_string_type(domain): raise ValueError("Domain must be a string type.") model.domain = domain
python
{ "resource": "" }
q33002
set_model_version
train
def set_model_version(model, version): """ Sets the version of the ONNX model. :param model: instance of an ONNX model :param version: integer containing the version of the model Example: :: from onnxmltools.utils import set_model_version onnx_model = load_model("SqueezeNet.onnx") set_model_version(onnx_model, 1) """ if model is None or not isinstance(model, onnx_proto.ModelProto): raise ValueError("Model is not a valid ONNX model.") if not convert_utils.is_numeric_type(version): raise ValueError("Version must be a numeric type.") model.model_version = version
python
{ "resource": "" }
q33003
set_model_doc_string
train
def set_model_doc_string(model, doc, override=False): """ Sets the doc string of the ONNX model. :param model: instance of an ONNX model :param doc: string containing the doc string that describes the model. :param override: bool if true will always override the doc string with the new value Example: :: from onnxmltools.utils import set_model_doc_string onnx_model = load_model("SqueezeNet.onnx") set_model_doc_string(onnx_model, "Sample doc string") """ if model is None or not isinstance(model, onnx_proto.ModelProto): raise ValueError("Model is not a valid ONNX model.") if not convert_utils.is_string_type(doc): raise ValueError("Doc must be a string type.") if model.doc_string and not doc and override is False: raise ValueError("Failing to overwrite the doc string with a blank string, set override to True if intentional.") model.doc_string = doc
python
{ "resource": "" }
q33004
ModelComponentContainer.add_initializer
train
def add_initializer(self, name, onnx_type, shape, content): ''' Add a TensorProto into the initializer list of the final ONNX model :param name: Variable name in the produced ONNX model. :param onnx_type: Element types allowed in ONNX tensor, e.g., TensorProto.FLOAT and TensorProto.STRING. :param shape: Tensor shape, a list of integers. :param content: Flattened tensor values (i.e., a float list or a float array). ''' if any(d is None for d in shape): raise ValueError('Shape of initializer cannot contain None') tensor = helper.make_tensor(name, onnx_type, shape, content) self.initializers.append(tensor)
python
{ "resource": "" }
q33005
convert_tensor_float_to_float16
train
def convert_tensor_float_to_float16(tensor): ''' Convert tensor float to float16. :param tensor: TensorProto object :return tensor_float16: converted TensorProto object Example: :: from onnxmltools.utils.float16_converter import convert_tensor_float_to_float16 new_tensor = convert_tensor_float_to_float16(tensor) ''' if not isinstance(tensor, onnx_proto.TensorProto): raise ValueError('Expected input type is an ONNX TensorProto but got %s' % type(tensor)) if tensor.data_type == onnx_proto.TensorProto.FLOAT: tensor.data_type = onnx_proto.TensorProto.FLOAT16 # convert float_data (float type) to float16 and write to int32_data if tensor.float_data: int_list = _npfloat16_to_int(np.float16(tensor.float_data)) tensor.int32_data[:] = int_list tensor.float_data[:] = [] # convert raw_data (bytes type) if tensor.raw_data: # convert n.raw_data to float float32_list = np.fromstring(tensor.raw_data, dtype='float32') # convert float to float16 float16_list = np.float16(float32_list) # convert float16 to bytes and write back to raw_data tensor.raw_data = float16_list.tostring() return tensor
python
{ "resource": "" }
q33006
_validate_metadata
train
def _validate_metadata(metadata_props): ''' Validate metadata properties and possibly show warnings or throw exceptions. :param metadata_props: A dictionary of metadata properties, with property names and values (see :func:`~onnxmltools.utils.metadata_props.add_metadata_props` for examples) ''' if len(CaseInsensitiveDict(metadata_props)) != len(metadata_props): raise RuntimeError('Duplicate metadata props found') for key, value in metadata_props.items(): valid_values = KNOWN_METADATA_PROPS.get(key) if valid_values and value.lower() not in valid_values: warnings.warn('Key {} has invalid value {}. Valid values are {}'.format(key, value, valid_values))
python
{ "resource": "" }
q33007
set_denotation
train
def set_denotation(onnx_model, input_name, denotation, target_opset, dimension_denotation=None): ''' Set input type denotation and dimension denotation. Type denotation is a feature in ONNX 1.2.1 that let's the model specify the content of a tensor (e.g. IMAGE or AUDIO). This information can be used by the backend. One example where it is useful is in images: Whenever data is bound to a tensor with type denotation IMAGE, the backend can process the data (such as transforming the color space and pixel format) based on model metadata properties. :param onnx_model: ONNX model object :param input_name: Name of input tensor to edit (example: `'data0'`) :param denotation: Input type denotation (`documentation <https://github.com/onnx/onnx/blob/master/docs/TypeDenotation.md#type-denotation-definition>`_) (example: `'IMAGE'`) :param target_opset: Target ONNX opset :param dimension_denotation: List of dimension type denotations. The length of the list must be the same of the number of dimensions in the tensor (`documentation https://github.com/onnx/onnx/blob/master/docs/DimensionDenotation.md#denotation-definition>`_) (example: `['DATA_BATCH', 'DATA_CHANNEL', 'DATA_FEATURE', 'DATA_FEATURE']`) ''' if target_opset < 7: warnings.warn('Denotation is not supported in targeted opset - %d' % target_opset) return for graph_input in onnx_model.graph.input: if graph_input.name == input_name: graph_input.type.denotation = denotation if dimension_denotation: dimensions = graph_input.type.tensor_type.shape.dim if len(dimension_denotation) != len(dimensions): raise RuntimeError('Wrong number of dimensions: input "{}" has {} dimensions'.format(input_name, len(dimensions))) for dimension, channel_denotation in zip(dimensions, dimension_denotation): dimension.denotation = channel_denotation return onnx_model raise RuntimeError('Input "{}" not found'.format(input_name))
python
{ "resource": "" }
q33008
concatenate_variables
train
def concatenate_variables(scope, variables, container): ''' This function allocate operators to from a float tensor by concatenating all input variables. Notice that if all integer inputs would be converted to floats before concatenation. ''' # Check if it's possible to concatenate those inputs. type_set = set(type(variable.type) for variable in variables) number_type_set = {FloatType, FloatTensorType, Int64Type, Int64TensorType} if StringType in type_set and any(number_type in type_set for number_type in number_type_set): raise RuntimeError('We are not able to concatenate numerical tensor(s) and string tensor(s)') input_names = [] # input variables' names we want to concatenate input_dims = [] # dimensions of the variables that is going to be concatenated # Collect input variable names and do cast if needed for variable in variables: if isinstance(variable.type, (Int64TensorType, Int64Type)): input_names.append(convert_integer_to_float(scope, variable, container)) else: input_names.append(variable.full_name) # We assume input variables' shape are [1, C_1], ..., [1, C_n] if there are n inputs. input_dims.append(variable.type.shape[1]) if len(input_names) == 1: # No need to concatenate tensors if there is only one input return input_names[0] else: # To combine all inputs, we need a FeatureVectorizer op_type = 'FeatureVectorizer' attrs = {'name': scope.get_unique_operator_name(op_type), 'inputdimensions': input_dims} # Create a variable name to capture feature vectorizer's output concatenated_name = scope.get_unique_variable_name('concatenated') # Set up our FeatureVectorizer container.add_node(op_type, input_names, concatenated_name, op_domain='ai.onnx.ml', **attrs) return concatenated_name
python
{ "resource": "" }
q33009
find_type_conversion
train
def find_type_conversion(source_type, target_type): """ Find the operator name for converting source_type into target_type """ if type(source_type) == type(target_type): return 'identity' elif type(target_type) == FloatTensorType: return 'imageToFloatTensor' else: raise ValueError('Unsupported type conversion from %s to %s' % ( source_type, target_type))
python
{ "resource": "" }
q33010
get_cool_off
train
def get_cool_off() -> Optional[timedelta]: """ Return the login cool off time interpreted from settings.AXES_COOLOFF_TIME. The return value is either None or timedelta. Notice that the settings.AXES_COOLOFF_TIME is either None, timedelta, or integer of hours, and this function offers a unified _timedelta or None_ representation of that configuration for use with the Axes internal implementations. :exception TypeError: if settings.AXES_COOLOFF_TIME is of wrong type. """ cool_off = settings.AXES_COOLOFF_TIME if isinstance(cool_off, int): return timedelta(hours=cool_off) return cool_off
python
{ "resource": "" }
q33011
get_credentials
train
def get_credentials(username: str = None, **kwargs) -> dict: """ Calculate credentials for Axes to use internally from given username and kwargs. Axes will set the username value into the key defined with ``settings.AXES_USERNAME_FORM_FIELD`` and update the credentials dictionary with the kwargs given on top of that. """ credentials = {settings.AXES_USERNAME_FORM_FIELD: username} credentials.update(kwargs) return credentials
python
{ "resource": "" }
q33012
get_client_username
train
def get_client_username(request: AxesHttpRequest, credentials: dict = None) -> str: """ Resolve client username from the given request or credentials if supplied. The order of preference for fetching the username is as follows: 1. If configured, use ``AXES_USERNAME_CALLABLE``, and supply ``request, credentials`` as arguments 2. If given, use ``credentials`` and fetch username from ``AXES_USERNAME_FORM_FIELD`` (defaults to ``username``) 3. Use request.POST and fetch username from ``AXES_USERNAME_FORM_FIELD`` (defaults to ``username``) :param request: incoming Django ``HttpRequest`` or similar object from authentication backend or other source :param credentials: incoming credentials ``dict`` or similar object from authentication backend or other source """ if settings.AXES_USERNAME_CALLABLE: log.debug('Using settings.AXES_USERNAME_CALLABLE to get username') if callable(settings.AXES_USERNAME_CALLABLE): return settings.AXES_USERNAME_CALLABLE(request, credentials) if isinstance(settings.AXES_USERNAME_CALLABLE, str): return import_string(settings.AXES_USERNAME_CALLABLE)(request, credentials) raise TypeError('settings.AXES_USERNAME_CALLABLE needs to be a string, callable, or None.') if credentials: log.debug('Using parameter credentials to get username with key settings.AXES_USERNAME_FORM_FIELD') return credentials.get(settings.AXES_USERNAME_FORM_FIELD, None) log.debug('Using parameter request.POST to get username with key settings.AXES_USERNAME_FORM_FIELD') return request.POST.get(settings.AXES_USERNAME_FORM_FIELD, None)
python
{ "resource": "" }
q33013
get_client_ip_address
train
def get_client_ip_address(request: HttpRequest) -> str: """ Get client IP address as configured by the user. The django-ipware package is used for address resolution and parameters can be configured in the Axes package. """ client_ip_address, _ = ipware.ip2.get_client_ip( request, proxy_order=settings.AXES_PROXY_ORDER, proxy_count=settings.AXES_PROXY_COUNT, proxy_trusted_ips=settings.AXES_PROXY_TRUSTED_IPS, request_header_order=settings.AXES_META_PRECEDENCE_ORDER, ) return str(ip_address(client_ip_address))
python
{ "resource": "" }
q33014
get_client_parameters
train
def get_client_parameters(username: str, ip_address: str, user_agent: str) -> dict: """ Get query parameters for filtering AccessAttempt queryset. This method returns a dict that guarantees iteration order for keys and values, and can so be used in e.g. the generation of hash keys or other deterministic functions. """ filter_kwargs = dict() if settings.AXES_ONLY_USER_FAILURES: # 1. Only individual usernames can be tracked with parametrization filter_kwargs['username'] = username else: if settings.AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP: # 2. A combination of username and IP address can be used as well filter_kwargs['username'] = username filter_kwargs['ip_address'] = ip_address else: # 3. Default case is to track the IP address only, which is the most secure option filter_kwargs['ip_address'] = ip_address if settings.AXES_USE_USER_AGENT: # 4. The HTTP User-Agent can be used to track e.g. one browser filter_kwargs['user_agent'] = user_agent return filter_kwargs
python
{ "resource": "" }
q33015
get_query_str
train
def get_query_str(query: Type[QueryDict], max_length: int = 1024) -> str: """ Turns a query dictionary into an easy-to-read list of key-value pairs. If a field is called either ``'password'`` or ``settings.AXES_PASSWORD_FORM_FIELD`` it will be excluded. The length of the output is limited to max_length to avoid a DoS attack via excessively large payloads. """ query_dict = query.copy() query_dict.pop('password', None) query_dict.pop(settings.AXES_PASSWORD_FORM_FIELD, None) query_str = '\n'.join( f'{key}={value}' for key, value in query_dict.items() ) return query_str[:max_length]
python
{ "resource": "" }
q33016
is_client_ip_address_whitelisted
train
def is_client_ip_address_whitelisted(request: AxesHttpRequest): """ Check if the given request refers to a whitelisted IP. """ if settings.AXES_NEVER_LOCKOUT_WHITELIST and is_ip_address_in_whitelist(request.axes_ip_address): return True if settings.AXES_ONLY_WHITELIST and is_ip_address_in_whitelist(request.axes_ip_address): return True return False
python
{ "resource": "" }
q33017
is_client_ip_address_blacklisted
train
def is_client_ip_address_blacklisted(request: AxesHttpRequest) -> bool: """ Check if the given request refers to a blacklisted IP. """ if is_ip_address_in_blacklist(request.axes_ip_address): return True if settings.AXES_ONLY_WHITELIST and not is_ip_address_in_whitelist(request.axes_ip_address): return True return False
python
{ "resource": "" }
q33018
is_client_method_whitelisted
train
def is_client_method_whitelisted(request: AxesHttpRequest) -> bool: """ Check if the given request uses a whitelisted method. """ if settings.AXES_NEVER_LOCKOUT_GET and request.method == 'GET': return True return False
python
{ "resource": "" }
q33019
get_client_cache_key
train
def get_client_cache_key(request_or_attempt: Union[HttpRequest, Any], credentials: dict = None) -> str: """ Build cache key name from request or AccessAttempt object. :param request_or_attempt: HttpRequest or AccessAttempt object :param credentials: credentials containing user information :return cache_key: Hash key that is usable for Django cache backends """ if isinstance(request_or_attempt, HttpRequest): username = get_client_username(request_or_attempt, credentials) ip_address = get_client_ip_address(request_or_attempt) user_agent = get_client_user_agent(request_or_attempt) else: username = request_or_attempt.username ip_address = request_or_attempt.ip_address user_agent = request_or_attempt.user_agent filter_kwargs = get_client_parameters(username, ip_address, user_agent) cache_key_components = ''.join(filter_kwargs.values()) cache_key_digest = md5(cache_key_components.encode()).hexdigest() cache_key = f'axes-{cache_key_digest}' return cache_key
python
{ "resource": "" }
q33020
AxesDatabaseHandler.user_login_failed
train
def user_login_failed( self, sender, credentials: dict, request: AxesHttpRequest = None, **kwargs ): # pylint: disable=too-many-locals """ When user login fails, save AccessAttempt record in database and lock user out if necessary. :raises AxesSignalPermissionDenied: if user should be locked out. """ if request is None: log.error('AXES: AxesDatabaseHandler.user_login_failed does not function without a request.') return # 1. database query: Clean up expired user attempts from the database before logging new attempts clean_expired_user_attempts(request.axes_attempt_time) username = get_client_username(request, credentials) client_str = get_client_str(username, request.axes_ip_address, request.axes_user_agent, request.axes_path_info) get_data = get_query_str(request.GET) post_data = get_query_str(request.POST) if self.is_whitelisted(request, credentials): log.info('AXES: Login failed from whitelisted client %s.', client_str) return # 2. database query: Calculate the current maximum failure number from the existing attempts failures_since_start = 1 + self.get_failures(request, credentials) # 3. database query: Insert or update access records with the new failure data if failures_since_start > 1: # Update failed attempt information but do not touch the username, IP address, or user agent fields, # because attackers can request the site with multiple different configurations # in order to bypass the defense mechanisms that are used by the site. log.warning( 'AXES: Repeated login failure by %s. Count = %d of %d. Updating existing record in the database.', client_str, failures_since_start, settings.AXES_FAILURE_LIMIT, ) separator = '\n---------\n' attempts = get_user_attempts(request, credentials) attempts.update( get_data=Concat('get_data', Value(separator + get_data)), post_data=Concat('post_data', Value(separator + post_data)), http_accept=request.axes_http_accept, path_info=request.axes_path_info, failures_since_start=failures_since_start, attempt_time=request.axes_attempt_time, ) else: # Record failed attempt with all the relevant information. # Filtering based on username, IP address and user agent handled elsewhere, # and this handler just records the available information for further use. log.warning( 'AXES: New login failure by %s. Creating new record in the database.', client_str, ) AccessAttempt.objects.create( username=username, ip_address=request.axes_ip_address, user_agent=request.axes_user_agent, get_data=get_data, post_data=post_data, http_accept=request.axes_http_accept, path_info=request.axes_path_info, failures_since_start=failures_since_start, attempt_time=request.axes_attempt_time, ) if failures_since_start >= settings.AXES_FAILURE_LIMIT: log.warning('AXES: Locking out %s after repeated login failures.', client_str) user_locked_out.send( 'axes', request=request, username=username, ip_address=request.axes_ip_address, ) raise AxesSignalPermissionDenied('Locked out due to repeated login failures.')
python
{ "resource": "" }
q33021
AxesDatabaseHandler.user_logged_out
train
def user_logged_out(self, sender, request: AxesHttpRequest, user, **kwargs): # pylint: disable=unused-argument """ When user logs out, update the AccessLog related to the user. """ # 1. database query: Clean up expired user attempts from the database clean_expired_user_attempts(request.axes_attempt_time) username = user.get_username() client_str = get_client_str(username, request.axes_ip_address, request.axes_user_agent, request.axes_path_info) log.info('AXES: Successful logout by %s.', client_str) if username and not settings.AXES_DISABLE_ACCESS_LOG: # 2. database query: Update existing attempt logs with logout time AccessLog.objects.filter( username=username, logout_time__isnull=True, ).update( logout_time=request.axes_attempt_time, )
python
{ "resource": "" }
q33022
AxesCacheHandler.user_login_failed
train
def user_login_failed( self, sender, credentials: dict, request: AxesHttpRequest = None, **kwargs ): # pylint: disable=too-many-locals """ When user login fails, save attempt record in cache and lock user out if necessary. :raises AxesSignalPermissionDenied: if user should be locked out. """ if request is None: log.error('AXES: AxesCacheHandler.user_login_failed does not function without a request.') return username = get_client_username(request, credentials) client_str = get_client_str(username, request.axes_ip_address, request.axes_user_agent, request.axes_path_info) if self.is_whitelisted(request, credentials): log.info('AXES: Login failed from whitelisted client %s.', client_str) return failures_since_start = 1 + self.get_failures(request, credentials) if failures_since_start > 1: log.warning( 'AXES: Repeated login failure by %s. Count = %d of %d. Updating existing record in the cache.', client_str, failures_since_start, settings.AXES_FAILURE_LIMIT, ) else: log.warning( 'AXES: New login failure by %s. Creating new record in the cache.', client_str, ) cache_key = get_client_cache_key(request, credentials) self.cache.set(cache_key, failures_since_start, self.cache_timeout) if failures_since_start >= settings.AXES_FAILURE_LIMIT: log.warning('AXES: Locking out %s after repeated login failures.', client_str) user_locked_out.send( 'axes', request=request, username=username, ip_address=request.axes_ip_address, ) raise AxesSignalPermissionDenied('Locked out due to repeated login failures.')
python
{ "resource": "" }
q33023
AxesMiddleware.update_request
train
def update_request(self, request: HttpRequest): """ Update given Django ``HttpRequest`` with necessary attributes before passing it on the ``get_response`` for further Django middleware and view processing. """ request.axes_attempt_time = now() request.axes_ip_address = get_client_ip_address(request) request.axes_user_agent = get_client_user_agent(request) request.axes_path_info = get_client_path_info(request) request.axes_http_accept = get_client_http_accept(request)
python
{ "resource": "" }
q33024
AxesMiddleware.process_exception
train
def process_exception(self, request: AxesHttpRequest, exception): # pylint: disable=inconsistent-return-statements """ Exception handler that processes exceptions raised by the Axes signal handler when request fails with login. Only ``axes.exceptions.AxesSignalPermissionDenied`` exception is handled by this middleware. """ if isinstance(exception, AxesSignalPermissionDenied): return get_lockout_response(request)
python
{ "resource": "" }
q33025
AxesHandler.is_allowed
train
def is_allowed(self, request: AxesHttpRequest, credentials: dict = None) -> bool: """ Checks if the user is allowed to access or use given functionality such as a login view or authentication. This method is abstract and other backends can specialize it as needed, but the default implementation checks if the user has attempted to authenticate into the site too many times through the Django authentication backends and returns ``False``if user exceeds the configured Axes thresholds. This checker can implement arbitrary checks such as IP whitelisting or blacklisting, request frequency checking, failed attempt monitoring or similar functions. Please refer to the ``axes.handlers.database.AxesDatabaseHandler`` for the default implementation and inspiration on some common checks and access restrictions before writing your own implementation. """ if self.is_blacklisted(request, credentials): return False if self.is_whitelisted(request, credentials): return True if self.is_locked(request, credentials): return False return True
python
{ "resource": "" }
q33026
AxesHandler.is_blacklisted
train
def is_blacklisted(self, request: AxesHttpRequest, credentials: dict = None) -> bool: # pylint: disable=unused-argument """ Checks if the request or given credentials are blacklisted from access. """ if is_client_ip_address_blacklisted(request): return True return False
python
{ "resource": "" }
q33027
AxesHandler.is_whitelisted
train
def is_whitelisted(self, request: AxesHttpRequest, credentials: dict = None) -> bool: # pylint: disable=unused-argument """ Checks if the request or given credentials are whitelisted for access. """ if is_client_ip_address_whitelisted(request): return True if is_client_method_whitelisted(request): return True return False
python
{ "resource": "" }
q33028
AxesHandler.is_locked
train
def is_locked(self, request: AxesHttpRequest, credentials: dict = None) -> bool: """ Checks if the request or given credentials are locked. """ if settings.AXES_LOCK_OUT_AT_FAILURE: return self.get_failures(request, credentials) >= settings.AXES_FAILURE_LIMIT return False
python
{ "resource": "" }
q33029
reset
train
def reset(ip: str = None, username: str = None) -> int: """ Reset records that match IP or username, and return the count of removed attempts. This utility method is meant to be used from the CLI or via Python API. """ attempts = AccessAttempt.objects.all() if ip: attempts = attempts.filter(ip_address=ip) if username: attempts = attempts.filter(username=username) count, _ = attempts.delete() log.info('AXES: Reset %s access attempts from database.', count) return count
python
{ "resource": "" }
q33030
get_cool_off_threshold
train
def get_cool_off_threshold(attempt_time: datetime = None) -> datetime: """ Get threshold for fetching access attempts from the database. """ cool_off = get_cool_off() if cool_off is None: raise TypeError('Cool off threshold can not be calculated with settings.AXES_COOLOFF_TIME set to None') if attempt_time is None: return now() - cool_off return attempt_time - cool_off
python
{ "resource": "" }
q33031
filter_user_attempts
train
def filter_user_attempts(request: AxesHttpRequest, credentials: dict = None) -> QuerySet: """ Return a queryset of AccessAttempts that match the given request and credentials. """ username = get_client_username(request, credentials) filter_kwargs = get_client_parameters(username, request.axes_ip_address, request.axes_user_agent) return AccessAttempt.objects.filter(**filter_kwargs)
python
{ "resource": "" }
q33032
get_user_attempts
train
def get_user_attempts(request: AxesHttpRequest, credentials: dict = None) -> QuerySet: """ Get valid user attempts that match the given request and credentials. """ attempts = filter_user_attempts(request, credentials) if settings.AXES_COOLOFF_TIME is None: log.debug('AXES: Getting all access attempts from database because no AXES_COOLOFF_TIME is configured') return attempts threshold = get_cool_off_threshold(request.axes_attempt_time) log.debug('AXES: Getting access attempts that are newer than %s', threshold) return attempts.filter(attempt_time__gte=threshold)
python
{ "resource": "" }
q33033
clean_expired_user_attempts
train
def clean_expired_user_attempts(attempt_time: datetime = None) -> int: """ Clean expired user attempts from the database. """ if settings.AXES_COOLOFF_TIME is None: log.debug('AXES: Skipping clean for expired access attempts because no AXES_COOLOFF_TIME is configured') return 0 threshold = get_cool_off_threshold(attempt_time) count, _ = AccessAttempt.objects.filter(attempt_time__lt=threshold).delete() log.info('AXES: Cleaned up %s expired access attempts from database that were older than %s', count, threshold) return count
python
{ "resource": "" }
q33034
reset_user_attempts
train
def reset_user_attempts(request: AxesHttpRequest, credentials: dict = None) -> int: """ Reset all user attempts that match the given request and credentials. """ attempts = filter_user_attempts(request, credentials) count, _ = attempts.delete() log.info('AXES: Reset %s access attempts from database.', count) return count
python
{ "resource": "" }
q33035
is_user_attempt_whitelisted
train
def is_user_attempt_whitelisted(request: AxesHttpRequest, credentials: dict = None) -> bool: """ Check if the given request or credentials refer to a whitelisted username. A whitelisted user has the magic ``nolockout`` property set. If the property is unknown or False or the user can not be found, this implementation fails gracefully and returns True. """ username_field = getattr(get_user_model(), 'USERNAME_FIELD', 'username') username_value = get_client_username(request, credentials) kwargs = { username_field: username_value } user_model = get_user_model() try: user = user_model.objects.get(**kwargs) return user.nolockout except (user_model.DoesNotExist, AttributeError): pass return False
python
{ "resource": "" }
q33036
AxesProxyHandler.get_implementation
train
def get_implementation(cls, force: bool = False) -> AxesHandler: """ Fetch and initialize configured handler implementation and memoize it to avoid reinitialization. This method is re-entrant and can be called multiple times from e.g. Django application loader. """ if force or not cls.implementation: cls.implementation = import_string(settings.AXES_HANDLER)() return cls.implementation
python
{ "resource": "" }
q33037
AppConfig.initialize
train
def initialize(cls): """ Initialize Axes logging and show version information. This method is re-entrant and can be called multiple times. It displays version information exactly once at application startup. """ if cls.logging_initialized: return cls.logging_initialized = True if not settings.AXES_VERBOSE: return log.info('AXES: BEGIN LOG') log.info('AXES: Using django-axes %s', get_version()) if settings.AXES_ONLY_USER_FAILURES: log.info('AXES: blocking by username only.') elif settings.AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP: log.info('AXES: blocking by combination of username and IP.') else: log.info('AXES: blocking by IP only.')
python
{ "resource": "" }
q33038
AxesBackend.authenticate
train
def authenticate(self, request: AxesHttpRequest, username: str = None, password: str = None, **kwargs: dict): """ Checks user lockout status and raise a PermissionDenied if user is not allowed to log in. This method interrupts the login flow and inserts error message directly to the ``response_context`` attribute that is supplied as a keyword argument. :keyword response_context: kwarg that will be have its ``error`` attribute updated with context. :raises AxesBackendRequestParameterRequired: if request parameter is not passed. :raises AxesBackendPermissionDenied: if user is already locked out. """ if request is None: raise AxesBackendRequestParameterRequired('AxesBackend requires a request as an argument to authenticate') credentials = get_credentials(username=username, password=password, **kwargs) if AxesProxyHandler.is_allowed(request, credentials): return # Locked out, don't try to authenticate, just update response_context and return. # Its a bit weird to pass a context and expect a response value but its nice to get a "why" back. error_msg = get_lockout_message() response_context = kwargs.get('response_context', {}) response_context['error'] = error_msg # Raise an error that stops the authentication flows at django.contrib.auth.authenticate. # This error stops bubbling up at the authenticate call which catches backend PermissionDenied errors. # After this error is caught by authenticate it emits a signal indicating user login failed, # which is processed by axes.signals.log_user_login_failed which logs the attempt and raises # a second exception which bubbles up the middleware stack and produces a HTTP 403 Forbidden reply # in the axes.middleware.AxesMiddleware.process_exception middleware exception handler. raise AxesBackendPermissionDenied('AxesBackend detected that the given user is locked out')
python
{ "resource": "" }
q33039
DynamicSampler.results
train
def results(self): """Saved results from the dynamic nested sampling run. All saved bounds are also returned.""" # Add all saved samples (and ancillary quantities) to the results. with warnings.catch_warnings(): warnings.simplefilter("ignore") results = [('niter', self.it - 1), ('ncall', np.array(self.saved_nc)), ('eff', self.eff), ('samples', np.array(self.saved_v)), ('samples_id', np.array(self.saved_id)), ('samples_batch', np.array(self.saved_batch, dtype='int')), ('samples_it', np.array(self.saved_it)), ('samples_u', np.array(self.saved_u)), ('samples_n', np.array(self.saved_n)), ('logwt', np.array(self.saved_logwt)), ('logl', np.array(self.saved_logl)), ('logvol', np.array(self.saved_logvol)), ('logz', np.array(self.saved_logz)), ('logzerr', np.sqrt(np.array(self.saved_logzvar))), ('information', np.array(self.saved_h)), ('batch_nlive', np.array(self.saved_batch_nlive, dtype='int')), ('batch_bounds', np.array(self.saved_batch_bounds))] # Add any saved bounds (and ancillary quantities) to the results. if self.sampler.save_bounds: results.append(('bound', copy.deepcopy(self.bound))) results.append(('bound_iter', np.array(self.saved_bounditer, dtype='int'))) results.append(('samples_bound', np.array(self.saved_boundidx, dtype='int'))) results.append(('scale', np.array(self.saved_scale))) return Results(results)
python
{ "resource": "" }
q33040
randsphere
train
def randsphere(n, rstate=None): """Draw a point uniformly within an `n`-dimensional unit sphere.""" if rstate is None: rstate = np.random z = rstate.randn(n) # initial n-dim vector zhat = z / lalg.norm(z) # normalize xhat = zhat * rstate.rand()**(1./n) # scale return xhat
python
{ "resource": "" }
q33041
bounding_ellipsoid
train
def bounding_ellipsoid(points, pointvol=0.): """ Calculate the bounding ellipsoid containing a collection of points. Parameters ---------- points : `~numpy.ndarray` with shape (npoints, ndim) A set of coordinates. pointvol : float, optional The minimum volume occupied by a single point. When provided, used to set a minimum bound on the ellipsoid volume as `npoints * pointvol`. Default is `0.`. Returns ------- ellipsoid : :class:`Ellipsoid` The bounding :class:`Ellipsoid` object. """ npoints, ndim = points.shape # Check for valid `pointvol` value if provided. if pointvol < 0.: raise ValueError("You must specify a non-negative value " "for `pointvol`.") # If there is only a single point, return an n-sphere with volume # `pointvol` centered at the point. if npoints == 1: if pointvol > 0.: ctr = points[0] r = np.exp((np.log(pointvol) - logvol_prefactor(ndim)) / ndim) covar = r**2 * np.identity(ndim) return Ellipsoid(ctr, covar) else: raise ValueError("Cannot compute a bounding ellipsoid to a " "single point if `pointvol` is not specified.") # Calculate covariance of points. ctr = np.mean(points, axis=0) cov = mle_cov(points, rowvar=False) # When ndim = 1, `np.cov` returns a 0-d array. Make it a 1x1 2-d array. if ndim == 1: cov = np.atleast_2d(cov) # For a ball of uniformly distributed points, the sample covariance # will be smaller than the true covariance by a factor of 1/(n+2) # [see, e.g., goo.gl/UbsjYl]. Since we are assuming all points are # uniformly distributed within the unit cube, they are uniformly # distributed within any sub-volume within the cube. We expand # our sample covariance `cov` to compensate for this. cov *= (ndim + 2) # Define the axes of our ellipsoid. Ensures that `cov` is # nonsingular to deal with pathological cases where the ellipsoid has # "zero" volume. This can occur when `npoints <= ndim` or when enough # points are linear combinations of other points. covar = np.array(cov) for trials in range(100): try: # Check if matrix is invertible. am = lalg.pinvh(covar) l, v = lalg.eigh(covar) # compute eigenvalues/vectors if np.all((l > 0.) & (np.isfinite(l))): break else: raise RuntimeError("The eigenvalue/eigenvector decomposition " "failed!") except: # If the matrix remains singular/unstable, # suppress the off-diagonal elements. coeff = 1.1**(trials+1) / 1.1**100 covar = (1. - coeff) * cov + coeff * np.eye(ndim) pass else: warnings.warn("Failed to guarantee the ellipsoid axes will be " "non-singular. Defaulting to a sphere.") am = np.eye(ndim) # Calculate expansion factor necessary to bound each point. # Points should obey `(x-v)^T A (x-v) <= 1`, so we calculate this for # each point and then scale A up or down to make the # "outermost" point obey `(x-v)^T A (x-v) = 1`. This can be done # quickly using `einsum` and `tensordot` to iterate over all points. delta = points - ctr f = np.einsum('...i, ...i', np.tensordot(delta, am, axes=1), delta) fmax = np.max(f) # Due to round-off errors, we actually scale the ellipsoid so the # outermost point obeys `(x-v)^T A (x-v) < 1 - (a bit) < 1`. one_minus_a_bit = 1. - SQRTEPS if fmax > one_minus_a_bit: covar *= fmax / one_minus_a_bit # Initialize our ellipsoid. ell = Ellipsoid(ctr, covar) # Expand our ellipsoid to encompass a minimum volume. if pointvol > 0.: minvol = npoints * pointvol if ell.vol < minvol: ell.scale_to_vol(minvol) return ell
python
{ "resource": "" }
q33042
_bounding_ellipsoids
train
def _bounding_ellipsoids(points, ell, pointvol=0., vol_dec=0.5, vol_check=2.): """ Internal method used to compute a set of bounding ellipsoids when a bounding ellipsoid for the entire set has already been calculated. Parameters ---------- points : `~numpy.ndarray` with shape (npoints, ndim) A set of coordinates. ell : Ellipsoid The bounding ellipsoid containing :data:`points`. pointvol : float, optional Volume represented by a single point. When provided, used to set a minimum bound on the ellipsoid volume as `npoints * pointvol`. Default is `0.`. vol_dec : float, optional The required fractional reduction in volume after splitting an ellipsoid in order to to accept the split. Default is `0.5`. vol_check : float, optional The factor used to when checking whether the volume of the original bounding ellipsoid is large enough to warrant more trial splits via `ell.vol > vol_check * npoints * pointvol`. Default is `2.0`. Returns ------- ells : list of :class:`Ellipsoid` objects List of :class:`Ellipsoid` objects used to bound the collection of points. Used to initialize the :class:`MultiEllipsoid` object returned in :meth:`bounding_ellipsoids`. """ npoints, ndim = points.shape # Starting cluster centers are initialized using the major-axis # endpoints of the original bounding ellipsoid. p1, p2 = ell.major_axis_endpoints() start_ctrs = np.vstack((p1, p2)) # shape is (k, ndim) = (2, ndim) # Split points into two clusters using k-means clustering with k=2. try: with warnings.catch_warnings(): warnings.simplefilter("ignore") k2_res = kmeans2(points, k=start_ctrs, iter=10, minit='matrix', check_finite=False) labels = k2_res[1] # cluster identifier ; shape is (npoints,) # Get points in each cluster. points_k = [points[labels == k, :] for k in (0, 1)] # If either cluster has less than ndim+1 points, the bounding ellipsoid # will be ill-constrained. Reject the split and simply return the # original ellipsoid bounding all the points. if points_k[0].shape[0] < 2 * ndim or points_k[1].shape[0] < 2 * ndim: return [ell] # Bounding ellipsoid for each cluster, possibly enlarged # to a minimum volume. ells = [bounding_ellipsoid(points_j, pointvol=pointvol) for points_j in points_k] # If the total volume decreased by a factor of `vol_dec`, we accept # the split into subsets. We then recursively split each subset. if ells[0].vol + ells[1].vol < vol_dec * ell.vol: return (_bounding_ellipsoids(points_k[0], ells[0], pointvol=pointvol, vol_dec=vol_dec, vol_check=vol_check) + _bounding_ellipsoids(points_k[1], ells[1], pointvol=pointvol, vol_dec=vol_dec, vol_check=vol_check)) # Otherwise, see if the total ellipsoid volume is larger than the # minimum volume by a factor of `vol_check`. If it is, this indicates # that there may be more than 2 clusters and we should try to # subdivide further. if ell.vol > vol_check * npoints * pointvol: out = (_bounding_ellipsoids(points_k[0], ells[0], pointvol=pointvol, vol_dec=vol_dec, vol_check=vol_check) + _bounding_ellipsoids(points_k[1], ells[1], pointvol=pointvol, vol_dec=vol_dec, vol_check=vol_check)) # Only accept the split if the volume decreased significantly. if sum(e.vol for e in out) < vol_dec * ell.vol: return out except: pass # Otherwise, we are happy with the single bounding ellipsoid. return [ell]
python
{ "resource": "" }
q33043
bounding_ellipsoids
train
def bounding_ellipsoids(points, pointvol=0., vol_dec=0.5, vol_check=2.): """ Calculate a set of ellipsoids that bound the collection of points. Parameters ---------- points : `~numpy.ndarray` with shape (npoints, ndim) A set of coordinates. pointvol : float, optional Volume represented by a single point. When provided, used to set a minimum bound on the ellipsoid volume as `npoints * pointvol`. Default is `0.`. vol_dec : float, optional The required fractional reduction in volume after splitting an ellipsoid in order to to accept the split. Default is `0.5`. vol_check : float, optional The factor used to when checking whether the volume of the original bounding ellipsoid is large enough to warrant more trial splits via `ell.vol > vol_check * npoints * pointvol`. Default is `2.0`. Returns ------- mell : :class:`MultiEllipsoid` object The :class:`MultiEllipsoid` object used to bound the collection of points. """ if not HAVE_KMEANS: raise ValueError("scipy.cluster.vq.kmeans2 is required to compute " "ellipsoid decompositions.") # pragma: no cover # Calculate the bounding ellipsoid for the points possibly # enlarged to a minimum volume. ell = bounding_ellipsoid(points, pointvol=pointvol) # Recursively split the bounding ellipsoid until the volume of each # split no longer decreases by a factor of `vol_dec`. ells = _bounding_ellipsoids(points, ell, pointvol=pointvol, vol_dec=vol_dec, vol_check=vol_check) return MultiEllipsoid(ells=ells)
python
{ "resource": "" }
q33044
_ellipsoid_bootstrap_expand
train
def _ellipsoid_bootstrap_expand(args): """Internal method used to compute the expansion factor for a bounding ellipsoid based on bootstrapping.""" # Unzipping. points, pointvol = args rstate = np.random # Resampling. npoints, ndim = points.shape idxs = rstate.randint(npoints, size=npoints) # resample idx_in = np.unique(idxs) # selected objects sel = np.ones(npoints, dtype='bool') sel[idx_in] = False idx_out = np.arange(npoints)[sel] # "missing" objects if len(idx_out) < 2: # edge case idx_out = np.append(idx_out, [0, 1]) points_in, points_out = points[idx_in], points[idx_out] # Compute bounding ellipsoid. ell = bounding_ellipsoid(points_in, pointvol=pointvol) # Compute normalized distances to missing points. dists = [ell.distance(p) for p in points_out] # Compute expansion factor. expand = max(1., max(dists)) return expand
python
{ "resource": "" }
q33045
UnitCube.sample
train
def sample(self, rstate=None): """ Draw a sample uniformly distributed within the unit cube. Returns ------- x : `~numpy.ndarray` with shape (ndim,) A coordinate within the unit cube. """ if rstate is None: rstate = np.random return rstate.rand(self.n)
python
{ "resource": "" }
q33046
UnitCube.samples
train
def samples(self, nsamples, rstate=None): """ Draw `nsamples` samples randomly distributed within the unit cube. Returns ------- x : `~numpy.ndarray` with shape (nsamples, ndim) A collection of coordinates within the unit cube. """ if rstate is None: rstate = np.random xs = np.array([self.sample(rstate=rstate) for i in range(nsamples)]) return xs
python
{ "resource": "" }
q33047
Ellipsoid.scale_to_vol
train
def scale_to_vol(self, vol): """Scale ellipoid to a target volume.""" f = np.exp((np.log(vol) - np.log(self.vol)) / self.n) # linear factor self.expand *= f self.cov *= f**2 self.am *= f**-2 self.axlens *= f self.axes *= f self.vol = vol
python
{ "resource": "" }
q33048
Ellipsoid.major_axis_endpoints
train
def major_axis_endpoints(self): """Return the endpoints of the major axis.""" i = np.argmax(self.axlens) # find the major axis v = self.paxes[:, i] # vector from center to major axis endpoint return self.ctr - v, self.ctr + v
python
{ "resource": "" }
q33049
Ellipsoid.distance
train
def distance(self, x): """Compute the normalized distance to `x` from the center of the ellipsoid.""" d = x - self.ctr return np.sqrt(np.dot(np.dot(d, self.am), d))
python
{ "resource": "" }
q33050
Ellipsoid.randoffset
train
def randoffset(self, rstate=None): """Return a random offset from the center of the ellipsoid.""" if rstate is None: rstate = np.random return np.dot(self.axes, randsphere(self.n, rstate=rstate))
python
{ "resource": "" }
q33051
Ellipsoid.sample
train
def sample(self, rstate=None): """ Draw a sample uniformly distributed within the ellipsoid. Returns ------- x : `~numpy.ndarray` with shape (ndim,) A coordinate within the ellipsoid. """ if rstate is None: rstate = np.random return self.ctr + self.randoffset(rstate=rstate)
python
{ "resource": "" }
q33052
Ellipsoid.unitcube_overlap
train
def unitcube_overlap(self, ndraws=10000, rstate=None): """Using `ndraws` Monte Carlo draws, estimate the fraction of overlap between the ellipsoid and the unit cube.""" if rstate is None: rstate = np.random samples = [self.sample(rstate=rstate) for i in range(ndraws)] nin = sum([unitcheck(x) for x in samples]) return 1. * nin / ndraws
python
{ "resource": "" }
q33053
Ellipsoid.update
train
def update(self, points, pointvol=0., rstate=None, bootstrap=0, pool=None, mc_integrate=False): """ Update the ellipsoid to bound the collection of points. Parameters ---------- points : `~numpy.ndarray` with shape (npoints, ndim) The set of points to bound. pointvol : float, optional The minimum volume associated with each point. Default is `0.`. rstate : `~numpy.random.RandomState`, optional `~numpy.random.RandomState` instance. bootstrap : int, optional The number of bootstrapped realizations of the ellipsoid. The maximum distance to the set of points "left out" during each iteration is used to enlarge the resulting volumes. Default is `0`. pool : user-provided pool, optional Use this pool of workers to execute operations in parallel. mc_integrate : bool, optional Whether to use Monte Carlo methods to compute the effective overlap of the final ellipsoid with the unit cube. Default is `False`. """ if rstate is None: rstate = np.random # Compute new bounding ellipsoid. ell = bounding_ellipsoid(points, pointvol=pointvol) self.n = ell.n self.ctr = ell.ctr self.cov = ell.cov self.am = ell.am self.vol = ell.vol self.axlens = ell.axlens self.axes = ell.axes self.paxes = ell.paxes self.expand = ell.expand # Use bootstrapping to determine the volume expansion factor. if bootstrap > 0: # If provided, compute bootstraps in parallel using a pool. if pool is None: M = map else: M = pool.map ps = [points for it in range(bootstrap)] pvs = [pointvol for it in range(bootstrap)] args = zip(ps, pvs) expands = list(M(_ellipsoid_bootstrap_expand, args)) # Conservatively set the expansion factor to be the maximum # factor derived from our set of bootstraps. expand = max(expands) # If our ellipsoid is over-constrained, expand it. if expand > 1.: v = self.vol * expand**self.n self.scale_to_vol(v) # Estimate the fractional overlap with the unit cube using # Monte Carlo integration. if mc_integrate: self.funit = self.unitcube_overlap()
python
{ "resource": "" }
q33054
MultiEllipsoid.scale_to_vols
train
def scale_to_vols(self, vols): """Scale ellipoids to a corresponding set of target volumes.""" [self.ells[i].scale_to_vol(vols[i]) for i in range(self.nells)] self.vols = np.array(vols) self.expands = np.array([self.ells[i].expand for i in range(self.nells)]) vol_tot = sum(vols) self.expand_tot *= vol_tot / self.vol_tot self.vol_tot = vol_tot
python
{ "resource": "" }
q33055
MultiEllipsoid.update
train
def update(self, points, pointvol=0., vol_dec=0.5, vol_check=2., rstate=None, bootstrap=0, pool=None, mc_integrate=False): """ Update the set of ellipsoids to bound the collection of points. Parameters ---------- points : `~numpy.ndarray` with shape (npoints, ndim) The set of points to bound. pointvol : float, optional The minimum volume associated with each point. Default is `0.`. vol_dec : float, optional The required fractional reduction in volume after splitting an ellipsoid in order to to accept the split. Default is `0.5`. vol_check : float, optional The factor used when checking if the volume of the original bounding ellipsoid is large enough to warrant `> 2` splits via `ell.vol > vol_check * nlive * pointvol`. Default is `2.0`. rstate : `~numpy.random.RandomState`, optional `~numpy.random.RandomState` instance. bootstrap : int, optional The number of bootstrapped realizations of the ellipsoids. The maximum distance to the set of points "left out" during each iteration is used to enlarge the resulting volumes. Default is `0`. pool : user-provided pool, optional Use this pool of workers to execute operations in parallel. mc_integrate : bool, optional Whether to use Monte Carlo methods to compute the effective volume and fractional overlap of the final union of ellipsoids with the unit cube. Default is `False`. """ if rstate is None: rstate = np.random if not HAVE_KMEANS: raise ValueError("scipy.cluster.vq.kmeans2 is required " "to compute ellipsoid decompositions.") npoints, ndim = points.shape # Calculate the bounding ellipsoid for the points, possibly # enlarged to a minimum volume. firstell = bounding_ellipsoid(points, pointvol=pointvol) # Recursively split the bounding ellipsoid using `vol_check` # until the volume of each split no longer decreases by a # factor of `vol_dec`. ells = _bounding_ellipsoids(points, firstell, pointvol=pointvol, vol_dec=vol_dec, vol_check=vol_check) # Update the set of ellipsoids. self.nells = len(ells) self.ells = ells self.ctrs = np.array([ell.ctr for ell in self.ells]) self.covs = np.array([ell.cov for ell in self.ells]) self.ams = np.array([ell.am for ell in self.ells]) self.vols = np.array([ell.vol for ell in self.ells]) self.vol_tot = sum(self.vols) # Compute expansion factor. expands = np.array([ell.expand for ell in self.ells]) vols_orig = self.vols / expands vol_tot_orig = sum(vols_orig) self.expand_tot = self.vol_tot / vol_tot_orig # Use bootstrapping to determine the volume expansion factor. if bootstrap > 0: # If provided, compute bootstraps in parallel using a pool. if pool is None: M = map else: M = pool.map ps = [points for it in range(bootstrap)] pvs = [pointvol for it in range(bootstrap)] vds = [vol_dec for it in range(bootstrap)] vcs = [vol_check for it in range(bootstrap)] args = zip(ps, pvs, vds, vcs) expands = list(M(_ellipsoids_bootstrap_expand, args)) # Conservatively set the expansion factor to be the maximum # factor derived from our set of bootstraps. expand = max(expands) # If our ellipsoids are overly constrained, expand them. if expand > 1.: vs = self.vols * expand**ndim self.scale_to_vols(vs) # Estimate the volume and fractional overlap with the unit cube # using Monte Carlo integration. if mc_integrate: self.vol, self.funit = self.monte_carlo_vol(return_overlap=True)
python
{ "resource": "" }
q33056
RadFriends.scale_to_vol
train
def scale_to_vol(self, vol): """Scale ball to encompass a target volume.""" f = (vol / self.vol_ball) ** (1.0 / self.n) # linear factor self.expand *= f self.radius *= f self.vol_ball = vol
python
{ "resource": "" }
q33057
RadFriends.within
train
def within(self, x, ctrs, kdtree=None): """Check which balls `x` falls within. Uses a K-D Tree to perform the search if provided.""" if kdtree is None: # If no K-D Tree is provided, execute a brute-force # search over all balls. idxs = np.where(lalg.norm(ctrs - x, axis=1) <= self.radius)[0] else: # If a K-D Tree is provided, find all points within `self.radius`. idxs = kdtree.query_ball_point(x, self.radius, p=2.0, eps=0) return idxs
python
{ "resource": "" }
q33058
RadFriends.overlap
train
def overlap(self, x, ctrs, kdtree=None): """Check how many balls `x` falls within. Uses a K-D Tree to perform the search if provided.""" q = len(self.within(x, ctrs, kdtree=kdtree)) return q
python
{ "resource": "" }
q33059
RadFriends.contains
train
def contains(self, x, ctrs, kdtree=None): """Check if the set of balls contains `x`. Uses a K-D Tree to perform the search if provided.""" return self.overlap(x, ctrs, kdtree=kdtree) > 0
python
{ "resource": "" }
q33060
RadFriends.update
train
def update(self, points, pointvol=0., rstate=None, bootstrap=0, pool=None, kdtree=None, mc_integrate=False): """ Update the radii of our balls. Parameters ---------- points : `~numpy.ndarray` with shape (npoints, ndim) The set of points to bound. pointvol : float, optional The minimum volume associated with each point. Default is `0.`. rstate : `~numpy.random.RandomState`, optional `~numpy.random.RandomState` instance. bootstrap : int, optional The number of bootstrapped realizations of the ellipsoids. The maximum distance to the set of points "left out" during each iteration is used to enlarge the resulting volumes. Default is `0`. pool : user-provided pool, optional Use this pool of workers to execute operations in parallel. kdtree : `~scipy.spatial.KDTree`, optional K-D Tree used to perform nearest neighbor searches. mc_integrate : bool, optional Whether to use Monte Carlo methods to compute the effective volume and fractional overlap of the final union of balls with the unit cube. Default is `False`. """ # If possible, compute bootstraps in parallel using a pool. if pool is None: M = map else: M = pool.map if bootstrap == 0.: # Construct radius using leave-one-out if no bootstraps used. radii = _friends_leaveoneout_radius(points, 'balls') else: # Bootstrap radius using the set of live points. ps = [points for it in range(bootstrap)] ftypes = ['balls' for it in range(bootstrap)] args = zip(ps, ftypes) radii = list(M(_friends_bootstrap_radius, args)) # Conservatively set radius to be maximum of the set. rmax = max(radii) self.radius = rmax self.vol_ball = vol_prefactor(self.n) * self.radius**self.n self.expand = 1. # Expand our ball to encompass a minimum volume. if pointvol > 0.: v = pointvol if self.vol_ball < v: self.scale_to_vol(v) # Estimate the volume and fractional overlap with the unit cube # using Monte Carlo integration. if mc_integrate: self.vol, self.funit = self.monte_carlo_vol(points, kdtree=kdtree, return_overlap=True)
python
{ "resource": "" }
q33061
SupFriends.scale_to_vol
train
def scale_to_vol(self, vol): """Scale cube to encompass a target volume.""" f = (vol / self.vol_cube) ** (1.0 / self.n) # linear factor self.expand *= f self.hside *= f self.vol_cube = vol
python
{ "resource": "" }
q33062
SupFriends.within
train
def within(self, x, ctrs, kdtree=None): """Checks which cubes `x` falls within. Uses a K-D Tree to perform the search if provided.""" if kdtree is None: # If no KDTree is provided, execute a brute-force search # over all cubes. idxs = np.where(np.max(np.abs(ctrs - x), axis=1) <= self.hside)[0] else: # If a KDTree is provided, find all points within r (`hside`). idxs = kdtree.query_ball_point(x, self.hside, p=np.inf, eps=0) return idxs
python
{ "resource": "" }
q33063
SupFriends.update
train
def update(self, points, pointvol=0., rstate=None, bootstrap=0, pool=None, kdtree=None, mc_integrate=False): """ Update the half-side-lengths of our cubes. Parameters ---------- points : `~numpy.ndarray` with shape (npoints, ndim) The set of points to bound. pointvol : float, optional The minimum volume associated with each point. Default is `0.`. rstate : `~numpy.random.RandomState`, optional `~numpy.random.RandomState` instance. bootstrap : int, optional The number of bootstrapped realizations of the ellipsoids. The maximum distance to the set of points "left out" during each iteration is used to enlarge the resulting volumes. Default is `0`. pool : user-provided pool, optional Use this pool of workers to execute operations in parallel. kdtree : `~scipy.spatial.KDTree`, optional K-D Tree used to perform nearest neighbor searches. mc_integrate : bool, optional Whether to use Monte Carlo methods to compute the effective volume and fractional overlap of the final union of balls with the unit cube. Default is `False`. """ if rstate is None: rstate = np.random # If possible, compute bootstraps in parallel using a pool. if pool is None: M = map else: M = pool.map if bootstrap == 0.: # Construct radius using leave-one-out if no bootstraps used. hsides = _friends_leaveoneout_radius(points, 'cubes') else: # Bootstrap radius using the set of live points. ps = [points for it in range(bootstrap)] ftypes = ['cubes' for it in range(bootstrap)] args = zip(ps, ftypes) hsides = list(M(_friends_bootstrap_radius, args)) # Conservatively set radius to be maximum of the set. hsmax = max(hsides) self.hside = hsmax self.vol_cube = (2. * self.hside)**self.n self.expand = 1. # Expand our cube to encompass a minimum volume. if pointvol > 0.: v = pointvol if self.vol_cube < v: self.scale_to_vol(v) # Estimate the volume and fractional overlap with the unit cube # using Monte Carlo integration. if mc_integrate: self.vol, self.funit = self.monte_carlo_vol(points, kdtree=kdtree, return_overlap=True)
python
{ "resource": "" }
q33064
sample_unif
train
def sample_unif(args): """ Evaluate a new point sampled uniformly from a bounding proposal distribution. Parameters are zipped within `args` to utilize `pool.map`-style functions. Parameters ---------- u : `~numpy.ndarray` with shape (npdim,) Position of the initial sample. loglstar : float Ln(likelihood) bound. **Not applicable here.** axes : `~numpy.ndarray` with shape (ndim, ndim) Axes used to propose new points. **Not applicable here.** scale : float Value used to scale the provided axes. **Not applicable here.** prior_transform : function Function transforming a sample from the a unit cube to the parameter space of interest according to the prior. loglikelihood : function Function returning ln(likelihood) given parameters as a 1-d `~numpy` array of length `ndim`. kwargs : dict A dictionary of additional method-specific parameters. **Not applicable here.** Returns ------- u : `~numpy.ndarray` with shape (npdim,) Position of the final proposed point within the unit cube. **For uniform sampling this is the same as the initial input position.** v : `~numpy.ndarray` with shape (ndim,) Position of the final proposed point in the target parameter space. logl : float Ln(likelihood) of the final proposed point. nc : int Number of function calls used to generate the sample. For uniform sampling this is `1` by construction. blob : dict Collection of ancillary quantities used to tune :data:`scale`. **Not applicable for uniform sampling.** """ # Unzipping. (u, loglstar, axes, scale, prior_transform, loglikelihood, kwargs) = args # Evaluate. v = prior_transform(np.array(u)) logl = loglikelihood(np.array(v)) nc = 1 blob = None return u, v, logl, nc, blob
python
{ "resource": "" }
q33065
sample_rwalk
train
def sample_rwalk(args): """ Return a new live point proposed by random walking away from an existing live point. Parameters ---------- u : `~numpy.ndarray` with shape (npdim,) Position of the initial sample. **This is a copy of an existing live point.** loglstar : float Ln(likelihood) bound. axes : `~numpy.ndarray` with shape (ndim, ndim) Axes used to propose new points. For random walks new positions are proposed using the :class:`~dynesty.bounding.Ellipsoid` whose shape is defined by axes. scale : float Value used to scale the provided axes. prior_transform : function Function transforming a sample from the a unit cube to the parameter space of interest according to the prior. loglikelihood : function Function returning ln(likelihood) given parameters as a 1-d `~numpy` array of length `ndim`. kwargs : dict A dictionary of additional method-specific parameters. Returns ------- u : `~numpy.ndarray` with shape (npdim,) Position of the final proposed point within the unit cube. v : `~numpy.ndarray` with shape (ndim,) Position of the final proposed point in the target parameter space. logl : float Ln(likelihood) of the final proposed point. nc : int Number of function calls used to generate the sample. blob : dict Collection of ancillary quantities used to tune :data:`scale`. """ # Unzipping. (u, loglstar, axes, scale, prior_transform, loglikelihood, kwargs) = args rstate = np.random # Periodicity. nonperiodic = kwargs.get('nonperiodic', None) # Setup. n = len(u) walks = kwargs.get('walks', 25) # number of steps accept = 0 reject = 0 fail = 0 nfail = 0 nc = 0 ncall = 0 drhat, dr, du, u_prop, logl_prop = np.nan, np.nan, np.nan, np.nan, np.nan while nc < walks or accept == 0: while True: # Check scale-factor. if scale == 0.: raise RuntimeError("The random walk sampling is stuck! " "Some useful output quantities:\n" "u: {0}\n" "drhat: {1}\n" "dr: {2}\n" "du: {3}\n" "u_prop: {4}\n" "loglstar: {5}\n" "logl_prop: {6}\n" "axes: {7}\n" "scale: {8}." .format(u, drhat, dr, du, u_prop, loglstar, logl_prop, axes, scale)) # Propose a direction on the unit n-sphere. drhat = rstate.randn(n) drhat /= linalg.norm(drhat) # Scale based on dimensionality. dr = drhat * rstate.rand()**(1./n) # Transform to proposal distribution. du = np.dot(axes, dr) u_prop = u + scale * du # Check unit cube constraints. if unitcheck(u_prop, nonperiodic): break else: fail += 1 nfail += 1 # Check if we're stuck generating bad numbers. if fail > 100 * walks: warnings.warn("Random number generation appears to be " "extremely inefficient. Adjusting the " "scale-factor accordingly.") fail = 0 scale *= math.exp(-1. / n) # Check proposed point. v_prop = prior_transform(np.array(u_prop)) logl_prop = loglikelihood(np.array(v_prop)) if logl_prop >= loglstar: u = u_prop v = v_prop logl = logl_prop accept += 1 else: reject += 1 nc += 1 ncall += 1 # Check if we're stuck generating bad points. if nc > 50 * walks: scale *= math.exp(-1. / n) warnings.warn("Random walk proposals appear to be " "extremely inefficient. Adjusting the " "scale-factor accordingly.") nc, accept, reject = 0, 0, 0 # reset values blob = {'accept': accept, 'reject': reject, 'fail': nfail, 'scale': scale} return u, v, logl, ncall, blob
python
{ "resource": "" }
q33066
Sampler.results
train
def results(self): """Saved results from the nested sampling run. If bounding distributions were saved, those are also returned.""" # Add all saved samples to the results. if self.save_samples: with warnings.catch_warnings(): warnings.simplefilter("ignore") results = [('nlive', self.nlive), ('niter', self.it - 1), ('ncall', np.array(self.saved_nc)), ('eff', self.eff), ('samples', np.array(self.saved_v)), ('samples_id', np.array(self.saved_id)), ('samples_it', np.array(self.saved_it)), ('samples_u', np.array(self.saved_u)), ('logwt', np.array(self.saved_logwt)), ('logl', np.array(self.saved_logl)), ('logvol', np.array(self.saved_logvol)), ('logz', np.array(self.saved_logz)), ('logzerr', np.sqrt(np.array(self.saved_logzvar))), ('information', np.array(self.saved_h))] else: raise ValueError("You didn't save any samples!") # Add any saved bounds (and ancillary quantities) to the results. if self.save_bounds: results.append(('bound', copy.deepcopy(self.bound))) results.append(('bound_iter', np.array(self.saved_bounditer, dtype='int'))) results.append(('samples_bound', np.array(self.saved_boundidx, dtype='int'))) results.append(('scale', np.array(self.saved_scale))) return Results(results)
python
{ "resource": "" }
q33067
Sampler._beyond_unit_bound
train
def _beyond_unit_bound(self, loglstar): """Check whether we should update our bound beyond the initial unit cube.""" if self.logl_first_update is None: # If we haven't already updated our bounds, check if we satisfy # the provided criteria for establishing the first bounding update. check = (self.ncall > self.ubound_ncall and self.eff < self.ubound_eff) if check: # Save the log-likelihood where our first update took place. self.logl_first_update = loglstar return check else: # If we've already update our bounds, check if we've exceeded the # saved log-likelihood threshold. (This is useful when sampling # within `dynamicsampler`). return loglstar >= self.logl_first_update
python
{ "resource": "" }
q33068
Sampler._empty_queue
train
def _empty_queue(self): """Dump all live point proposals currently on the queue.""" while True: try: # Remove unused points from the queue. self.queue.pop() self.unused += 1 # add to the total number of unused points self.nqueue -= 1 except: # If the queue is empty, we're done! self.nqueue = 0 break
python
{ "resource": "" }
q33069
Sampler._fill_queue
train
def _fill_queue(self, loglstar): """Sequentially add new live point proposals to the queue.""" # Add/zip arguments to submit to the queue. point_queue = [] axes_queue = [] while self.nqueue < self.queue_size: if self._beyond_unit_bound(loglstar): # Propose points using the provided sampling/bounding options. point, axes = self.propose_point() evolve_point = self.evolve_point else: # Propose/evaluate points directly from the unit cube. point = self.rstate.rand(self.npdim) axes = np.identity(self.npdim) evolve_point = sample_unif point_queue.append(point) axes_queue.append(axes) self.nqueue += 1 loglstars = [loglstar for i in range(self.queue_size)] scales = [self.scale for i in range(self.queue_size)] ptforms = [self.prior_transform for i in range(self.queue_size)] logls = [self.loglikelihood for i in range(self.queue_size)] kwargs = [self.kwargs for i in range(self.queue_size)] args = zip(point_queue, loglstars, axes_queue, scales, ptforms, logls, kwargs) if self.use_pool_evolve: # Use the pool to propose ("evolve") a new live point. self.queue = list(self.M(evolve_point, args)) else: # Propose ("evolve") a new live point using the default `map` # function. self.queue = list(map(evolve_point, args))
python
{ "resource": "" }
q33070
Sampler._get_point_value
train
def _get_point_value(self, loglstar): """Grab the first live point proposal in the queue.""" # If the queue is empty, refill it. if self.nqueue <= 0: self._fill_queue(loglstar) # Grab the earliest entry. u, v, logl, nc, blob = self.queue.pop(0) self.used += 1 # add to the total number of used points self.nqueue -= 1 return u, v, logl, nc, blob
python
{ "resource": "" }
q33071
Sampler._new_point
train
def _new_point(self, loglstar, logvol): """Propose points until a new point that satisfies the log-likelihood constraint `loglstar` is found.""" ncall, nupdate = 0, 0 while True: # Get the next point from the queue u, v, logl, nc, blob = self._get_point_value(loglstar) ncall += nc # Bounding checks. ucheck = ncall >= self.update_interval * (1 + nupdate) bcheck = self._beyond_unit_bound(loglstar) # If our queue is empty, update any tuning parameters associated # with our proposal (sampling) method. if blob is not None and self.nqueue <= 0 and bcheck: self.update_proposal(blob) # If we satisfy the log-likelihood constraint, we're done! if logl >= loglstar: break # If there has been more than `update_interval` function calls # made *and* we satisfy the criteria for moving beyond sampling # from the unit cube, update the bound. if ucheck and bcheck: pointvol = math.exp(logvol) / self.nlive bound = self.update(pointvol) if self.save_bounds: self.bound.append(bound) self.nbound += 1 nupdate += 1 self.since_update = -ncall # ncall will be added back later return u, v, logl, ncall
python
{ "resource": "" }
q33072
Sampler._remove_live_points
train
def _remove_live_points(self): """Remove the final set of live points if they were previously added to the current set of dead points.""" if self.added_live: self.added_live = False if self.save_samples: del self.saved_id[-self.nlive:] del self.saved_u[-self.nlive:] del self.saved_v[-self.nlive:] del self.saved_logl[-self.nlive:] del self.saved_logvol[-self.nlive:] del self.saved_logwt[-self.nlive:] del self.saved_logz[-self.nlive:] del self.saved_logzvar[-self.nlive:] del self.saved_h[-self.nlive:] del self.saved_nc[-self.nlive:] del self.saved_boundidx[-self.nlive:] del self.saved_it[-self.nlive:] del self.saved_bounditer[-self.nlive:] del self.saved_scale[-self.nlive:] else: raise ValueError("No live points were added to the " "list of samples!")
python
{ "resource": "" }
q33073
Prior.update
train
def update(self, **kwargs): """Update `params` values using alias. """ for k in self.prior_params: try: self.params[k] = kwargs[self.alias[k]] except(KeyError): pass
python
{ "resource": "" }
q33074
Prior.sample
train
def sample(self, nsample=None, **kwargs): """Draw a sample from the prior distribution. :param nsample: (optional) Unused """ if len(kwargs) > 0: self.update(**kwargs) return self.distribution.rvs(*self.args, size=len(self), loc=self.loc, scale=self.scale)
python
{ "resource": "" }
q33075
Prior.inverse_unit_transform
train
def inverse_unit_transform(self, x, **kwargs): """Go from the parameter value to the unit coordinate using the cdf. """ if len(kwargs) > 0: self.update(**kwargs) return self.distribution.cdf(x, *self.args, loc=self.loc, scale=self.scale)
python
{ "resource": "" }
q33076
UnitCubeSampler.update_slice
train
def update_slice(self, blob): """Update the slice proposal scale based on the relative size of the slices compared to our initial guess.""" nexpand, ncontract = blob['nexpand'], blob['ncontract'] self.scale *= nexpand / (2. * ncontract)
python
{ "resource": "" }
q33077
UnitCubeSampler.update_hslice
train
def update_hslice(self, blob): """Update the Hamiltonian slice proposal scale based on the relative amount of time spent moving vs reflecting.""" nmove, nreflect = blob['nmove'], blob['nreflect'] ncontract = blob.get('ncontract', 0) fmove = (1. * nmove) / (nmove + nreflect + ncontract + 2) norm = max(self.fmove, 1. - self.fmove) self.scale *= math.exp((fmove - self.fmove) / norm)
python
{ "resource": "" }
q33078
SingleEllipsoidSampler.update
train
def update(self, pointvol): """Update the bounding ellipsoid using the current set of live points.""" # Check if we should use the provided pool for updating. if self.use_pool_update: pool = self.pool else: pool = None # Update the ellipsoid. self.ell.update(self.live_u, pointvol=pointvol, rstate=self.rstate, bootstrap=self.bootstrap, pool=pool) if self.enlarge != 1.: self.ell.scale_to_vol(self.ell.vol * self.enlarge) return copy.deepcopy(self.ell)
python
{ "resource": "" }
q33079
MultiEllipsoidSampler.update
train
def update(self, pointvol): """Update the bounding ellipsoids using the current set of live points.""" # Check if we should use the pool for updating. if self.use_pool_update: pool = self.pool else: pool = None # Update the bounding ellipsoids. self.mell.update(self.live_u, pointvol=pointvol, vol_dec=self.vol_dec, vol_check=self.vol_check, rstate=self.rstate, bootstrap=self.bootstrap, pool=pool) if self.enlarge != 1.: self.mell.scale_to_vols(self.mell.vols * self.enlarge) return copy.deepcopy(self.mell)
python
{ "resource": "" }
q33080
RadFriendsSampler.update
train
def update(self, pointvol): """Update the N-sphere radii using the current set of live points.""" # Initialize a K-D Tree to assist nearest neighbor searches. if self.use_kdtree: kdtree = spatial.KDTree(self.live_u) else: kdtree = None # Check if we should use the provided pool for updating. if self.use_pool_update: pool = self.pool else: pool = None # Update the N-spheres. self.radfriends.update(self.live_u, pointvol=pointvol, rstate=self.rstate, bootstrap=self.bootstrap, pool=pool, kdtree=kdtree) if self.enlarge != 1.: self.radfriends.scale_to_vol(self.radfriends.vol_ball * self.enlarge) return copy.deepcopy(self.radfriends)
python
{ "resource": "" }
q33081
Results.summary
train
def summary(self): """Return a formatted string giving a quick summary of the results.""" res = ("nlive: {:d}\n" "niter: {:d}\n" "ncall: {:d}\n" "eff(%): {:6.3f}\n" "logz: {:6.3f} +/- {:6.3f}" .format(self.nlive, self.niter, sum(self.ncall), self.eff, self.logz[-1], self.logzerr[-1])) print('Summary\n=======\n'+res)
python
{ "resource": "" }
q33082
unitcheck
train
def unitcheck(u, nonperiodic=None): """Check whether `u` is inside the unit cube. Given a masked array `nonperiodic`, also allows periodic boundaries conditions to exceed the unit cube.""" if nonperiodic is None: # No periodic boundary conditions provided. return np.all(u > 0.) and np.all(u < 1.) else: # Alternating periodic and non-periodic boundary conditions. return (np.all(u[nonperiodic] > 0.) and np.all(u[nonperiodic] < 1.) and np.all(u[~nonperiodic] > -0.5) and np.all(u[~nonperiodic] < 1.5))
python
{ "resource": "" }
q33083
mean_and_cov
train
def mean_and_cov(samples, weights): """ Compute the weighted mean and covariance of the samples. Parameters ---------- samples : `~numpy.ndarray` with shape (nsamples, ndim) 2-D array containing data samples. This ordering is equivalent to using `rowvar=False` in `~numpy.cov`. weights : `~numpy.ndarray` with shape (nsamples,) 1-D array of sample weights. Returns ------- mean : `~numpy.ndarray` with shape (ndim,) Weighted sample mean vector. cov : `~numpy.ndarray` with shape (ndim, ndim) Weighted sample covariance matrix. Notes ----- Implements the formulae found `here <https://goo.gl/emWFLR>`_. """ # Compute the weighted mean. mean = np.average(samples, weights=weights, axis=0) # Compute the weighted covariance. dx = samples - mean wsum = np.sum(weights) w2sum = np.sum(weights**2) cov = wsum / (wsum**2 - w2sum) * np.einsum('i,ij,ik', weights, dx, dx) return mean, cov
python
{ "resource": "" }
q33084
resample_equal
train
def resample_equal(samples, weights, rstate=None): """ Resample a new set of points from the weighted set of inputs such that they all have equal weight. Each input sample appears in the output array either `floor(weights[i] * nsamples)` or `ceil(weights[i] * nsamples)` times, with `floor` or `ceil` randomly selected (weighted by proximity). Parameters ---------- samples : `~numpy.ndarray` with shape (nsamples,) Set of unequally weighted samples. weights : `~numpy.ndarray` with shape (nsamples,) Corresponding weight of each sample. rstate : `~numpy.random.RandomState`, optional `~numpy.random.RandomState` instance. Returns ------- equal_weight_samples : `~numpy.ndarray` with shape (nsamples,) New set of samples with equal weights. Examples -------- >>> x = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) >>> w = np.array([0.6, 0.2, 0.15, 0.05]) >>> utils.resample_equal(x, w) array([[ 1., 1.], [ 1., 1.], [ 1., 1.], [ 3., 3.]]) Notes ----- Implements the systematic resampling method described in `Hol, Schon, and Gustafsson (2006) <doi:10.1109/NSSPW.2006.4378824>`_. """ if rstate is None: rstate = np.random if abs(np.sum(weights) - 1.) > SQRTEPS: # same tol as in np.random.choice. raise ValueError("Weights do not sum to 1.") # Make N subdivisions and choose positions with a consistent random offset. nsamples = len(weights) positions = (rstate.random() + np.arange(nsamples)) / nsamples # Resample the data. idx = np.zeros(nsamples, dtype=np.int) cumulative_sum = np.cumsum(weights) i, j = 0, 0 while i < nsamples: if positions[i] < cumulative_sum[j]: idx[i] = j i += 1 else: j += 1 return samples[idx]
python
{ "resource": "" }
q33085
_get_nsamps_samples_n
train
def _get_nsamps_samples_n(res): """ Helper function for calculating the number of samples Parameters ---------- res : :class:`~dynesty.results.Results` instance The :class:`~dynesty.results.Results` instance taken from a previous nested sampling run. Returns ------- nsamps: int The total number of samples samples_n: array Number of live points at a given iteration """ try: # Check if the number of live points explicitly changes. samples_n = res.samples_n nsamps = len(samples_n) except: # If the number of live points is constant, compute `samples_n`. niter = res.niter nlive = res.nlive nsamps = len(res.logvol) if nsamps == niter: samples_n = np.ones(niter, dtype='int') * nlive elif nsamps == (niter + nlive): samples_n = np.append(np.ones(niter, dtype='int') * nlive, np.arange(1, nlive + 1)[::-1]) else: raise ValueError("Final number of samples differs from number of " "iterations and number of live points.") return nsamps, samples_n
python
{ "resource": "" }
q33086
reweight_run
train
def reweight_run(res, logp_new, logp_old=None): """ Reweight a given run based on a new target distribution. Parameters ---------- res : :class:`~dynesty.results.Results` instance The :class:`~dynesty.results.Results` instance taken from a previous nested sampling run. logp_new : `~numpy.ndarray` with shape (nsamps,) New target distribution evaluated at the location of the samples. logp_old : `~numpy.ndarray` with shape (nsamps,) Old target distribution evaluated at the location of the samples. If not provided, the `logl` values from `res` will be used. Returns ------- new_res : :class:`~dynesty.results.Results` instance A new :class:`~dynesty.results.Results` instance with corresponding weights based on our reweighted samples. """ # Extract info. if logp_old is None: logp_old = res['logl'] logrwt = logp_new - logp_old # ln(reweight) logvol = res['logvol'] logl = res['logl'] nsamps = len(logvol) # Compute weights using quadratic estimator. h = 0. logz = -1.e300 loglstar = -1.e300 logzvar = 0. logvols_pad = np.concatenate(([0.], logvol)) logdvols = misc.logsumexp(a=np.c_[logvols_pad[:-1], logvols_pad[1:]], axis=1, b=np.c_[np.ones(nsamps), -np.ones(nsamps)]) logdvols += math.log(0.5) dlvs = -np.diff(np.append(0., logvol)) saved_logwt, saved_logz, saved_logzvar, saved_h = [], [], [], [] for i in range(nsamps): loglstar_new = logl[i] logdvol, dlv = logdvols[i], dlvs[i] logwt = np.logaddexp(loglstar_new, loglstar) + logdvol + logrwt[i] logz_new = np.logaddexp(logz, logwt) try: lzterm = (math.exp(loglstar - logz_new) * loglstar + math.exp(loglstar_new - logz_new) * loglstar_new) except: lzterm = 0. h_new = (math.exp(logdvol) * lzterm + math.exp(logz - logz_new) * (h + logz) - logz_new) dh = h_new - h h = h_new logz = logz_new logzvar += dh * dlv loglstar = loglstar_new saved_logwt.append(logwt) saved_logz.append(logz) saved_logzvar.append(logzvar) saved_h.append(h) # Copy results. new_res = Results([item for item in res.items()]) # Overwrite items with our new estimates. new_res.logwt = np.array(saved_logwt) new_res.logz = np.array(saved_logz) new_res.logzerr = np.sqrt(np.array(saved_logzvar)) new_res.h = np.array(saved_h) return new_res
python
{ "resource": "" }
q33087
enum
train
def enum(enum_type='enum', base_classes=None, methods=None, **attrs): """ Generates a enumeration with the given attributes. """ # Enumerations can not be initalized as a new instance def __init__(instance, *args, **kwargs): raise RuntimeError('%s types can not be initialized.' % enum_type) if base_classes is None: base_classes = () if methods is None: methods = {} base_classes = base_classes + (object,) for k, v in methods.items(): methods[k] = classmethod(v) attrs['enums'] = attrs.copy() methods.update(attrs) methods['__init__'] = __init__ return type(to_string(enum_type), base_classes, methods)
python
{ "resource": "" }
q33088
SendmailEmailAdapter.send_email_message
train
def send_email_message(self, recipient, subject, html_message, text_message, sender_email, sender_name): """ Send email message via Flask-Sendmail. Args: recipient: Email address or tuple of (Name, Email-address). subject: Subject line. html_message: The message body in HTML. text_message: The message body in plain text. """ if not current_app.testing: # pragma: no cover # Prepare email message from flask_sendmail import Message message = Message( subject, recipients=[recipient], html=html_message, body=text_message) # Send email message self.mail.send(message)
python
{ "resource": "" }
q33089
DBManager.add_user_role
train
def add_user_role(self, user, role_name): """Associate a role name with a user.""" # For SQL: user.roles is list of pointers to Role objects if isinstance(self.db_adapter, SQLDbAdapter): # user.roles is a list of Role IDs # Get or add role role = self.db_adapter.find_first_object(self.RoleClass, name=role_name) if not role: role = self.RoleClass(name=role_name) self.db_adapter.add_object(role) user.roles.append(role) # For others: user.roles is a list of role names else: # user.roles is a list of role names user.roles.append(role_name)
python
{ "resource": "" }
q33090
DBManager.find_user_by_username
train
def find_user_by_username(self, username): """Find a User object by username.""" return self.db_adapter.ifind_first_object(self.UserClass, username=username)
python
{ "resource": "" }
q33091
DBManager.find_user_emails
train
def find_user_emails(self, user): """Find all the UserEmail object belonging to a user.""" user_emails = self.db_adapter.find_objects(self.UserEmailClass, user_id=user.id) return user_emails
python
{ "resource": "" }
q33092
DBManager.get_user_and_user_email_by_id
train
def get_user_and_user_email_by_id(self, user_or_user_email_id): """Retrieve the User and UserEmail object by ID.""" if self.UserEmailClass: user_email = self.db_adapter.get_object(self.UserEmailClass, user_or_user_email_id) user = user_email.user if user_email else None else: user = self.db_adapter.get_object(self.UserClass, user_or_user_email_id) user_email = user return (user, user_email)
python
{ "resource": "" }
q33093
DBManager.get_user_and_user_email_by_email
train
def get_user_and_user_email_by_email(self, email): """Retrieve the User and UserEmail object by email address.""" if self.UserEmailClass: user_email = self.db_adapter.ifind_first_object(self.UserEmailClass, email=email) user = user_email.user if user_email else None else: user = self.db_adapter.ifind_first_object(self.UserClass, email=email) user_email = user return (user, user_email)
python
{ "resource": "" }
q33094
DBManager.get_user_by_id
train
def get_user_by_id(self, id): """Retrieve a User object by ID.""" return self.db_adapter.get_object(self.UserClass, id=id)
python
{ "resource": "" }
q33095
DBManager.get_user_invitation_by_id
train
def get_user_invitation_by_id(self, id): """Retrieve a UserInvitation object by ID.""" return self.db_adapter.get_object(self.UserInvitationClass, id=id)
python
{ "resource": "" }
q33096
DBManager.get_user_roles
train
def get_user_roles(self, user): """Retrieve a list of user role names. .. note:: Database management methods. """ # For SQL: user.roles is list of pointers to Role objects if isinstance(self.db_adapter, SQLDbAdapter): # user.roles is a list of Role IDs user_roles = [role.name for role in user.roles] # For others: user.roles is a list of role names else: # user.roles is a list of role names user_roles = user.roles return user_roles
python
{ "resource": "" }
q33097
DBManager.save_user_and_user_email
train
def save_user_and_user_email(self, user, user_email): """Save the User and UserEmail object.""" if self.UserEmailClass: self.db_adapter.save_object(user_email) self.db_adapter.save_object(user)
python
{ "resource": "" }
q33098
DBManager.user_has_confirmed_email
train
def user_has_confirmed_email(self, user): """| Return True if user has a confirmed email. | Return False otherwise.""" if not self.user_manager.USER_ENABLE_EMAIL: return True if not self.user_manager.USER_ENABLE_CONFIRM_EMAIL: return True db_adapter = self.db_adapter # Handle multiple emails per user: Find at least one confirmed email if self.UserEmailClass: has_confirmed_email = False user_emails = db_adapter.find_objects(self.UserEmailClass, user_id=user.id) for user_email in user_emails: if user_email.email_confirmed_at: has_confirmed_email = True break # Handle single email per user else: has_confirmed_email = True if user.email_confirmed_at else False return has_confirmed_email
python
{ "resource": "" }
q33099
DBManager.username_is_available
train
def username_is_available(self, new_username): """Check if ``new_username`` is still available. | Returns True if ``new_username`` does not exist or belongs to the current user. | Return False otherwise. """ # Return True if new_username equals current user's username if self.user_manager.call_or_get(current_user.is_authenticated): if new_username == current_user.username: return True # Return True if new_username does not exist, # Return False otherwise. return self.find_user_by_username(new_username) == None
python
{ "resource": "" }