_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q35200
NewsApiClient.get_top_headlines
train
def get_top_headlines(self, q=None, sources=None, language='en', country=None, category=None, page_size=None, page=None): """ Returns live top and breaking headlines for a country, specific category in a country, single source, or multiple sources.. Optional parameters: (str) q - return headlines w/ specific keyword or phrase. For example: 'bitcoin', 'trump', 'tesla', 'ethereum', etc. (str) sources - return headlines of news sources! some Valid values are: 'bbc-news', 'the-verge', 'abc-news', 'crypto coins news', 'ary news','associated press','wired','aftenposten','australian financial review','axios', 'bbc news','bild','blasting news','bloomberg','business insider','engadget','google news', 'hacker news','info money,'recode','techcrunch','techradar','the next web','the verge' etc. (str) language - The 2-letter ISO-639-1 code of the language you want to get headlines for. Valid values are: 'ar','de','en','es','fr','he','it','nl','no','pt','ru','se','ud','zh' (str) country - The 2-letter ISO 3166-1 code of the country you want to get headlines! Valid values are: 'ae','ar','at','au','be','bg','br','ca','ch','cn','co','cu','cz','de','eg','fr','gb','gr', 'hk','hu','id','ie','il','in','it','jp','kr','lt','lv','ma','mx','my','ng','nl','no','nz', 'ph','pl','pt','ro','rs','ru','sa','se','sg','si','sk','th','tr','tw','ua','us' (str) category - The category you want to get headlines for! Valid values are: 'business','entertainment','general','health','science','sports','technology' (int) page_size - The number of results to return per page (request). 20 is the default, 100 is the maximum. (int) page - Use this to page through the results if the total results found is greater than the page size. """ # Define Payload payload = {} # Keyword/Phrase if q is not None: if type(q) == str: payload['q'] = q else: raise TypeError('keyword/phrase q param should be a of type str') # Sources if (sources is not None) and ((country is not None) or (category is not None)): raise ValueError('cannot mix country/category param with sources param.') # Sources if sources is not None: if type(sources) == str: payload['sources'] = sources else: raise TypeError('sources param should be of type str') # Language if language is not None: if type(language) == str: if language in const.languages: payload['language'] = language else: raise ValueError('invalid language') else: raise TypeError('language param should be of type str') # Country if country is not None: if type(country) == str: if country in const.countries: payload['country'] = country else: raise ValueError('invalid country') else: raise TypeError('country param should be of type str') # Category if category is not None: if type(category) == str: if category in const.categories: payload['category'] = category else: raise ValueError('invalid category') else: raise TypeError('category param should be of type str') # Page Size if page_size is not None: if type(page_size) == int: if 0 <= page_size <= 100: payload['pageSize'] = page_size else: raise ValueError('page_size param should be an int between 1 and 100') else: raise TypeError('page_size param should be an int') # Page if page is not None: if type(page) == int: if page > 0: payload['page'] = page else: raise ValueError('page param should be an int greater than 0') else: raise TypeError('page param should be an int') # Send Request r = requests.get(const.TOP_HEADLINES_URL, auth=self.auth, timeout=30, params=payload) # Check Status of Request if r.status_code != requests.codes.ok: raise NewsAPIException(r.json()) return r.json()
python
{ "resource": "" }
q35201
NewsApiClient.get_sources
train
def get_sources(self, category=None, language=None, country=None): """ Returns the subset of news publishers that top headlines... Optional parameters: (str) category - The category you want to get headlines for! Valid values are: 'business','entertainment','general','health','science','sports','technology' (str) language - The 2-letter ISO-639-1 code of the language you want to get headlines for. Valid values are: 'ar','de','en','es','fr','he','it','nl','no','pt','ru','se','ud','zh' (str) country - The 2-letter ISO 3166-1 code of the country you want to get headlines! Valid values are: 'ae','ar','at','au','be','bg','br','ca','ch','cn','co','cu','cz','de','eg','fr','gb','gr', 'hk','hu','id','ie','il','in','it','jp','kr','lt','lv','ma','mx','my','ng','nl','no','nz', 'ph','pl','pt','ro','rs','ru','sa','se','sg','si','sk','th','tr','tw','ua','us' (str) category - The category you want to get headlines for! Valid values are: 'business','entertainment','general','health','science','sports','technology' """ # Define Payload payload = {} # Language if language is not None: if type(language) == str: if language in const.languages: payload['language'] = language else: raise ValueError('invalid language') else: raise TypeError('language param should be of type str') # Country if country is not None: if type(country) == str: if country in const.countries: payload['country'] = country else: raise ValueError('invalid country') else: raise TypeError('country param should be of type str') # Category if category is not None: if type(category) == str: if category in const.categories: payload['category'] = category else: raise ValueError('invalid category') else: raise TypeError('category param should be of type str') # Send Request r = requests.get(const.SOURCES_URL, auth=self.auth, timeout=30, params=payload) # Check Status of Request if r.status_code != requests.codes.ok: raise NewsAPIException(r.json()) return r.json()
python
{ "resource": "" }
q35202
setup_logger
train
def setup_logger(name=None, logfile=None, level=logging.DEBUG, formatter=None, maxBytes=0, backupCount=0, fileLoglevel=None, disableStderrLogger=False): """ Configures and returns a fully configured logger instance, no hassles. If a logger with the specified name already exists, it returns the existing instance, else creates a new one. If you set the ``logfile`` parameter with a filename, the logger will save the messages to the logfile, but does not rotate by default. If you want to enable log rotation, set both ``maxBytes`` and ``backupCount``. Usage: .. code-block:: python from logzero import setup_logger logger = setup_logger() logger.info("hello") :arg string name: Name of the `Logger object <https://docs.python.org/2/library/logging.html#logger-objects>`_. Multiple calls to ``setup_logger()`` with the same name will always return a reference to the same Logger object. (default: ``__name__``) :arg string logfile: If set, also write logs to the specified filename. :arg int level: Minimum `logging-level <https://docs.python.org/2/library/logging.html#logging-levels>`_ to display (default: ``logging.DEBUG``). :arg Formatter formatter: `Python logging Formatter object <https://docs.python.org/2/library/logging.html#formatter-objects>`_ (by default uses the internal LogFormatter). :arg int maxBytes: Size of the logfile when rollover should occur. Defaults to 0, rollover never occurs. :arg int backupCount: Number of backups to keep. Defaults to 0, rollover never occurs. :arg int fileLoglevel: Minimum `logging-level <https://docs.python.org/2/library/logging.html#logging-levels>`_ for the file logger (is not set, it will use the loglevel from the ``level`` argument) :arg bool disableStderrLogger: Should the default stderr logger be disabled. Defaults to False. :return: A fully configured Python logging `Logger object <https://docs.python.org/2/library/logging.html#logger-objects>`_ you can use with ``.debug("msg")``, etc. """ _logger = logging.getLogger(name or __name__) _logger.propagate = False _logger.setLevel(level) # Reconfigure existing handlers stderr_stream_handler = None for handler in list(_logger.handlers): if hasattr(handler, LOGZERO_INTERNAL_LOGGER_ATTR): if isinstance(handler, logging.FileHandler): # Internal FileHandler needs to be removed and re-setup to be able # to set a new logfile. _logger.removeHandler(handler) continue elif isinstance(handler, logging.StreamHandler): stderr_stream_handler = handler # reconfigure handler handler.setLevel(level) handler.setFormatter(formatter or LogFormatter()) # remove the stderr handler (stream_handler) if disabled if disableStderrLogger: if stderr_stream_handler is not None: _logger.removeHandler(stderr_stream_handler) elif stderr_stream_handler is None: stderr_stream_handler = logging.StreamHandler() setattr(stderr_stream_handler, LOGZERO_INTERNAL_LOGGER_ATTR, True) stderr_stream_handler.setLevel(level) stderr_stream_handler.setFormatter(formatter or LogFormatter()) _logger.addHandler(stderr_stream_handler) if logfile: rotating_filehandler = RotatingFileHandler(filename=logfile, maxBytes=maxBytes, backupCount=backupCount) setattr(rotating_filehandler, LOGZERO_INTERNAL_LOGGER_ATTR, True) rotating_filehandler.setLevel(fileLoglevel or level) rotating_filehandler.setFormatter(formatter or LogFormatter(color=False)) _logger.addHandler(rotating_filehandler) return _logger
python
{ "resource": "" }
q35203
to_unicode
train
def to_unicode(value): """ Converts a string argument to a unicode string. If the argument is already a unicode string or None, it is returned unchanged. Otherwise it must be a byte string and is decoded as utf8. """ if isinstance(value, _TO_UNICODE_TYPES): return value if not isinstance(value, bytes): raise TypeError( "Expected bytes, unicode, or None; got %r" % type(value)) return value.decode("utf-8")
python
{ "resource": "" }
q35204
reset_default_logger
train
def reset_default_logger(): """ Resets the internal default logger to the initial configuration """ global logger global _loglevel global _logfile global _formatter _loglevel = logging.DEBUG _logfile = None _formatter = None logger = setup_logger(name=LOGZERO_DEFAULT_LOGGER, logfile=_logfile, level=_loglevel, formatter=_formatter)
python
{ "resource": "" }
q35205
slerp
train
def slerp(R1, R2, t1, t2, t_out): """Spherical linear interpolation of rotors This function uses a simpler interface than the more fundamental `slerp_evaluate` and `slerp_vectorized` functions. The latter are fast, being implemented at the C level, but take input `tau` instead of time. This function adjusts the time accordingly. Parameters ---------- R1: quaternion Quaternion at beginning of interpolation R2: quaternion Quaternion at end of interpolation t1: float Time corresponding to R1 t2: float Time corresponding to R2 t_out: float or array of floats Times to which the rotors should be interpolated """ tau = (t_out-t1)/(t2-t1) return np.slerp_vectorized(R1, R2, tau)
python
{ "resource": "" }
q35206
squad
train
def squad(R_in, t_in, t_out): """Spherical "quadrangular" interpolation of rotors with a cubic spline This is the best way to interpolate rotations. It uses the analog of a cubic spline, except that the interpolant is confined to the rotor manifold in a natural way. Alternative methods involving interpolation of other coordinates on the rotation group or normalization of interpolated values give bad results. The results from this method are as natural as any, and are continuous in first and second derivatives. The input `R_in` rotors are assumed to be reasonably continuous (no sign flips), and the input `t` arrays are assumed to be sorted. No checking is done for either case, and you may get silently bad results if these conditions are violated. This function simplifies the calling, compared to `squad_evaluate` (which takes a set of four quaternions forming the edges of the "quadrangle", and the normalized time `tau`) and `squad_vectorized` (which takes the same arguments, but in array form, and efficiently loops over them). Parameters ---------- R_in: array of quaternions A time-series of rotors (unit quaternions) to be interpolated t_in: array of float The times corresponding to R_in t_out: array of float The times to which R_in should be interpolated """ if R_in.size == 0 or t_out.size == 0: return np.array((), dtype=np.quaternion) # This list contains an index for each `t_out` such that # t_in[i-1] <= t_out < t_in[i] # Note that `side='right'` is much faster in my tests # i_in_for_out = t_in.searchsorted(t_out, side='left') # np.clip(i_in_for_out, 0, len(t_in) - 1, out=i_in_for_out) i_in_for_out = t_in.searchsorted(t_out, side='right')-1 # Now, for each index `i` in `i_in`, we need to compute the # interpolation "coefficients" (`A_i`, `B_ip1`). # # I previously tested an explicit version of the loops below, # comparing `stride_tricks.as_strided` with explicit # implementation via `roll` (as seen here). I found that the # `roll` was significantly more efficient for simple calculations, # though the difference is probably totally washed out here. In # any case, it might be useful to test again. # A = R_in * np.exp((- np.log((~R_in) * np.roll(R_in, -1)) + np.log((~np.roll(R_in, 1)) * R_in) * ((np.roll(t_in, -1) - t_in) / (t_in - np.roll(t_in, 1))) ) * 0.25) B = np.roll(R_in, -1) * np.exp((np.log((~np.roll(R_in, -1)) * np.roll(R_in, -2)) * ((np.roll(t_in, -1) - t_in) / (np.roll(t_in, -2) - np.roll(t_in, -1))) - np.log((~R_in) * np.roll(R_in, -1))) * -0.25) # Correct the first and last A time steps, and last two B time steps. We extend R_in with the following wrap-around # values: # R_in[0-1] = R_in[0]*(~R_in[1])*R_in[0] # R_in[n+0] = R_in[-1] * (~R_in[-2]) * R_in[-1] # R_in[n+1] = R_in[0] * (~R_in[-1]) * R_in[0] # = R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1] # = R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-2]) * R_in[-1] # A[i] = R_in[i] * np.exp((- np.log((~R_in[i]) * R_in[i+1]) # + np.log((~R_in[i-1]) * R_in[i]) * ((t_in[i+1] - t_in[i]) / (t_in[i] - t_in[i-1])) # ) * 0.25) # A[0] = R_in[0] * np.exp((- np.log((~R_in[0]) * R_in[1]) + np.log((~R_in[0])*R_in[1]*(~R_in[0])) * R_in[0]) * 0.25) # = R_in[0] A[0] = R_in[0] # A[-1] = R_in[-1] * np.exp((- np.log((~R_in[-1]) * R_in[n+0]) # + np.log((~R_in[-2]) * R_in[-1]) * ((t_in[n+0] - t_in[-1]) / (t_in[-1] - t_in[-2])) # ) * 0.25) # = R_in[-1] * np.exp((- np.log((~R_in[-1]) * R_in[n+0]) + np.log((~R_in[-2]) * R_in[-1])) * 0.25) # = R_in[-1] * np.exp((- np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1]) # + np.log((~R_in[-2]) * R_in[-1])) * 0.25) # = R_in[-1] * np.exp((- np.log((~R_in[-2]) * R_in[-1]) + np.log((~R_in[-2]) * R_in[-1])) * 0.25) # = R_in[-1] A[-1] = R_in[-1] # B[i] = R_in[i+1] * np.exp((np.log((~R_in[i+1]) * R_in[i+2]) * ((t_in[i+1] - t_in[i]) / (t_in[i+2] - t_in[i+1])) # - np.log((~R_in[i]) * R_in[i+1])) * -0.25) # B[-2] = R_in[-1] * np.exp((np.log((~R_in[-1]) * R_in[0]) * ((t_in[-1] - t_in[-2]) / (t_in[0] - t_in[-1])) # - np.log((~R_in[-2]) * R_in[-1])) * -0.25) # = R_in[-1] * np.exp((np.log((~R_in[-1]) * R_in[0]) - np.log((~R_in[-2]) * R_in[-1])) * -0.25) # = R_in[-1] * np.exp((np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1]) # - np.log((~R_in[-2]) * R_in[-1])) * -0.25) # = R_in[-1] * np.exp((np.log((~R_in[-2]) * R_in[-1]) - np.log((~R_in[-2]) * R_in[-1])) * -0.25) # = R_in[-1] B[-2] = R_in[-1] # B[-1] = R_in[0] # B[-1] = R_in[0] * np.exp((np.log((~R_in[0]) * R_in[1]) - np.log((~R_in[-1]) * R_in[0])) * -0.25) # = R_in[-1] * (~R_in[-2]) * R_in[-1] # * np.exp((np.log((~(R_in[-1] * (~R_in[-2]) * R_in[-1])) * R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-2]) * R_in[-1]) # - np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1])) * -0.25) # = R_in[-1] * (~R_in[-2]) * R_in[-1] # * np.exp((np.log(((~R_in[-1]) * R_in[-2] * (~R_in[-1])) * R_in[-1] * (~R_in[-2]) * R_in[-1] * (~R_in[-2]) * R_in[-1]) # - np.log((~R_in[-1]) * R_in[-1] * (~R_in[-2]) * R_in[-1])) * -0.25) # * np.exp((np.log((~R_in[-2]) * R_in[-1]) # - np.log((~R_in[-2]) * R_in[-1])) * -0.25) B[-1] = R_in[-1] * (~R_in[-2]) * R_in[-1] # Use the coefficients at the corresponding t_out indices to # compute the squad interpolant # R_ip1 = np.array(np.roll(R_in, -1)[i_in_for_out]) # R_ip1[-1] = R_in[-1]*(~R_in[-2])*R_in[-1] R_ip1 = np.roll(R_in, -1) R_ip1[-1] = R_in[-1]*(~R_in[-2])*R_in[-1] R_ip1 = np.array(R_ip1[i_in_for_out]) t_inp1 = np.roll(t_in, -1) t_inp1[-1] = t_in[-1] + (t_in[-1] - t_in[-2]) tau = (t_out - t_in[i_in_for_out]) / ((t_inp1 - t_in)[i_in_for_out]) # tau = (t_out - t_in[i_in_for_out]) / ((np.roll(t_in, -1) - t_in)[i_in_for_out]) R_out = np.squad_vectorized(tau, R_in[i_in_for_out], A[i_in_for_out], B[i_in_for_out], R_ip1) return R_out
python
{ "resource": "" }
q35207
integrate_angular_velocity
train
def integrate_angular_velocity(Omega, t0, t1, R0=None, tolerance=1e-12): """Compute frame with given angular velocity Parameters ========== Omega: tuple or callable Angular velocity from which to compute frame. Can be 1) a 2-tuple of float arrays (t, v) giving the angular velocity vector at a series of times, 2) a function of time that returns the 3-vector angular velocity, or 3) a function of time and orientation (t, R) that returns the 3-vector angular velocity In case 1, the angular velocity will be interpolated to the required times. Note that accuracy is poor in case 1. t0: float Initial time t1: float Final time R0: quaternion, optional Initial frame orientation. Defaults to 1 (the identity orientation). tolerance: float, optional Absolute tolerance used in integration. Defaults to 1e-12. Returns ======= t: float array R: quaternion array """ import warnings from scipy.integrate import ode if R0 is None: R0 = quaternion.one input_is_tabulated = False try: t_Omega, v = Omega from scipy.interpolate import InterpolatedUnivariateSpline Omega_x = InterpolatedUnivariateSpline(t_Omega, v[:, 0]) Omega_y = InterpolatedUnivariateSpline(t_Omega, v[:, 1]) Omega_z = InterpolatedUnivariateSpline(t_Omega, v[:, 2]) def Omega_func(t, R): return [Omega_x(t), Omega_y(t), Omega_z(t)] Omega_func(t0, R0) input_is_tabulated = True except (TypeError, ValueError): def Omega_func(t, R): return Omega(t, R) try: Omega_func(t0, R0) except TypeError: def Omega_func(t, R): return Omega(t) Omega_func(t0, R0) def RHS(t, y): R = quaternion.quaternion(*y) return (0.5 * quaternion.quaternion(0.0, *Omega_func(t, R)) * R).components y0 = R0.components if input_is_tabulated: from scipy.integrate import solve_ivp t = t_Omega t_span = [t_Omega[0], t_Omega[-1]] solution = solve_ivp(RHS, t_span, y0, t_eval=t_Omega, atol=tolerance, rtol=100*np.finfo(float).eps) R = quaternion.from_float_array(solution.y.T) else: solver = ode(RHS) solver.set_initial_value(y0, t0) solver.set_integrator('dop853', nsteps=1, atol=tolerance, rtol=0.0) solver._integrator.iwork[2] = -1 # suppress Fortran-printed warning t = appending_array((int(t1-t0),)) t.append(solver.t) R = appending_array((int(t1-t0), 4)) R.append(solver.y) warnings.filterwarnings("ignore", category=UserWarning) t_last = solver.t while solver.t < t1: solver.integrate(t1, step=True) if solver.t > t_last: t.append(solver.t) R.append(solver.y) t_last = solver.t warnings.resetwarnings() t = t.a R = quaternion.as_quat_array(R.a) return t, R
python
{ "resource": "" }
q35208
minimal_rotation
train
def minimal_rotation(R, t, iterations=2): """Adjust frame so that there is no rotation about z' axis The output of this function is a frame that rotates the z axis onto the same z' axis as the input frame, but with minimal rotation about that axis. This is done by pre-composing the input rotation with a rotation about the z axis through an angle gamma, where dgamma/dt = 2*(dR/dt * z * R.conjugate()).w This ensures that the angular velocity has no component along the z' axis. Note that this condition becomes easier to impose the closer the input rotation is to a minimally rotating frame, which means that repeated application of this function improves its accuracy. By default, this function is iterated twice, though a few more iterations may be called for. Parameters ========== R: quaternion array Time series describing rotation t: float array Corresponding times at which R is measured iterations: int [defaults to 2] Repeat the minimization to refine the result """ from scipy.interpolate import InterpolatedUnivariateSpline as spline if iterations == 0: return R R = quaternion.as_float_array(R) Rdot = np.empty_like(R) for i in range(4): Rdot[:, i] = spline(t, R[:, i]).derivative()(t) R = quaternion.from_float_array(R) Rdot = quaternion.from_float_array(Rdot) halfgammadot = quaternion.as_float_array(Rdot * quaternion.z * R.conjugate())[:, 0] halfgamma = spline(t, halfgammadot).antiderivative()(t) Rgamma = np.exp(quaternion.z * halfgamma) return minimal_rotation(R * Rgamma, t, iterations=iterations-1)
python
{ "resource": "" }
q35209
mean_rotor_in_chordal_metric
train
def mean_rotor_in_chordal_metric(R, t=None): """Return rotor that is closest to all R in the least-squares sense This can be done (quasi-)analytically because of the simplicity of the chordal metric function. The only approximation is the simple 2nd-order discrete formula for the definite integral of the input rotor function. Note that the `t` argument is optional. If it is present, the times are used to weight the corresponding integral. If it is not present, a simple sum is used instead (which may be slightly faster). """ if not t: return np.quaternion(*(np.sum(as_float_array(R)))).normalized() mean = np.empty((4,), dtype=float) definite_integral(as_float_array(R), t, mean) return np.quaternion(*mean).normalized()
python
{ "resource": "" }
q35210
as_float_array
train
def as_float_array(a): """View the quaternion array as an array of floats This function is fast (of order 1 microsecond) because no data is copied; the returned quantity is just a "view" of the original. The output view has one more dimension (of size 4) than the input array, but is otherwise the same shape. """ return np.asarray(a, dtype=np.quaternion).view((np.double, 4))
python
{ "resource": "" }
q35211
as_quat_array
train
def as_quat_array(a): """View a float array as an array of quaternions The input array must have a final dimension whose size is divisible by four (or better yet *is* 4), because successive indices in that last dimension will be considered successive components of the output quaternion. This function is usually fast (of order 1 microsecond) because no data is copied; the returned quantity is just a "view" of the original. However, if the input array is not C-contiguous (basically, as you increment the index into the last dimension of the array, you just move to the neighboring float in memory), the data will need to be copied which may be quite slow. Therefore, you should try to ensure that the input array is in that order. Slices and transpositions will frequently break that rule. We will not convert back from a two-spinor array because there is no unique convention for them, so I don't want to mess with that. Also, we want to discourage users from the slow, memory-copying process of swapping columns required for useful definitions of the two-spinors. """ a = np.asarray(a, dtype=np.double) # fast path if a.shape == (4,): return quaternion(a[0], a[1], a[2], a[3]) # view only works if the last axis is C-contiguous if not a.flags['C_CONTIGUOUS'] or a.strides[-1] != a.itemsize: a = a.copy(order='C') try: av = a.view(np.quaternion) except ValueError as e: message = (str(e) + '\n ' + 'Failed to view input data as a series of quaternions. ' + 'Please ensure that the last dimension has size divisible by 4.\n ' + 'Input data has shape {0} and dtype {1}.'.format(a.shape, a.dtype)) raise ValueError(message) # special case: don't create an axis for a single quaternion, to # match the output of `as_float_array` if av.shape[-1] == 1: av = av.reshape(a.shape[:-1]) return av
python
{ "resource": "" }
q35212
as_spinor_array
train
def as_spinor_array(a): """View a quaternion array as spinors in two-complex representation This function is relatively slow and scales poorly, because memory copying is apparently involved -- I think it's due to the "advanced indexing" required to swap the columns. """ a = np.atleast_1d(a) assert a.dtype == np.dtype(np.quaternion) # I'm not sure why it has to be so complicated, but all of these steps # appear to be necessary in this case. return a.view(np.float).reshape(a.shape + (4,))[..., [0, 3, 2, 1]].ravel().view(np.complex).reshape(a.shape + (2,))
python
{ "resource": "" }
q35213
from_rotation_vector
train
def from_rotation_vector(rot): """Convert input 3-vector in axis-angle representation to unit quaternion Parameters ---------- rot: (Nx3) float array Each vector represents the axis of the rotation, with norm proportional to the angle of the rotation in radians. Returns ------- q: array of quaternions Unit quaternions resulting in rotations corresponding to input rotations. Output shape is rot.shape[:-1]. """ rot = np.array(rot, copy=False) quats = np.zeros(rot.shape[:-1]+(4,)) quats[..., 1:] = rot[...]/2 quats = as_quat_array(quats) return np.exp(quats)
python
{ "resource": "" }
q35214
as_euler_angles
train
def as_euler_angles(q): """Open Pandora's Box If somebody is trying to make you use Euler angles, tell them no, and walk away, and go and tell your mum. You don't want to use Euler angles. They are awful. Stay away. It's one thing to convert from Euler angles to quaternions; at least you're moving in the right direction. But to go the other way?! It's just not right. Assumes the Euler angles correspond to the quaternion R via R = exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2) The angles are naturally in radians. NOTE: Before opening an issue reporting something "wrong" with this function, be sure to read all of the following page, *especially* the very last section about opening issues or pull requests. <https://github.com/moble/quaternion/wiki/Euler-angles-are-horrible> Parameters ---------- q: quaternion or array of quaternions The quaternion(s) need not be normalized, but must all be nonzero Returns ------- alpha_beta_gamma: float array Output shape is q.shape+(3,). These represent the angles (alpha, beta, gamma) in radians, where the normalized input quaternion represents `exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)`. Raises ------ AllHell ...if you try to actually use Euler angles, when you could have been using quaternions like a sensible person. """ alpha_beta_gamma = np.empty(q.shape + (3,), dtype=np.float) n = np.norm(q) q = as_float_array(q) alpha_beta_gamma[..., 0] = np.arctan2(q[..., 3], q[..., 0]) + np.arctan2(-q[..., 1], q[..., 2]) alpha_beta_gamma[..., 1] = 2*np.arccos(np.sqrt((q[..., 0]**2 + q[..., 3]**2)/n)) alpha_beta_gamma[..., 2] = np.arctan2(q[..., 3], q[..., 0]) - np.arctan2(-q[..., 1], q[..., 2]) return alpha_beta_gamma
python
{ "resource": "" }
q35215
from_euler_angles
train
def from_euler_angles(alpha_beta_gamma, beta=None, gamma=None): """Improve your life drastically Assumes the Euler angles correspond to the quaternion R via R = exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2) The angles naturally must be in radians for this to make any sense. NOTE: Before opening an issue reporting something "wrong" with this function, be sure to read all of the following page, *especially* the very last section about opening issues or pull requests. <https://github.com/moble/quaternion/wiki/Euler-angles-are-horrible> Parameters ---------- alpha_beta_gamma: float or array of floats This argument may either contain an array with last dimension of size 3, where those three elements describe the (alpha, beta, gamma) radian values for each rotation; or it may contain just the alpha values, in which case the next two arguments must also be given. beta: None, float, or array of floats If this array is given, it must be able to broadcast against the first and third arguments. gamma: None, float, or array of floats If this array is given, it must be able to broadcast against the first and second arguments. Returns ------- R: quaternion array The shape of this array will be the same as the input, except that the last dimension will be removed. """ # Figure out the input angles from either type of input if gamma is None: alpha_beta_gamma = np.asarray(alpha_beta_gamma, dtype=np.double) alpha = alpha_beta_gamma[..., 0] beta = alpha_beta_gamma[..., 1] gamma = alpha_beta_gamma[..., 2] else: alpha = np.asarray(alpha_beta_gamma, dtype=np.double) beta = np.asarray(beta, dtype=np.double) gamma = np.asarray(gamma, dtype=np.double) # Set up the output array R = np.empty(np.broadcast(alpha, beta, gamma).shape + (4,), dtype=np.double) # Compute the actual values of the quaternion components R[..., 0] = np.cos(beta/2)*np.cos((alpha+gamma)/2) # scalar quaternion components R[..., 1] = -np.sin(beta/2)*np.sin((alpha-gamma)/2) # x quaternion components R[..., 2] = np.sin(beta/2)*np.cos((alpha-gamma)/2) # y quaternion components R[..., 3] = np.cos(beta/2)*np.sin((alpha+gamma)/2) # z quaternion components return as_quat_array(R)
python
{ "resource": "" }
q35216
from_spherical_coords
train
def from_spherical_coords(theta_phi, phi=None): """Return the quaternion corresponding to these spherical coordinates Assumes the spherical coordinates correspond to the quaternion R via R = exp(phi*z/2) * exp(theta*y/2) The angles naturally must be in radians for this to make any sense. Note that this quaternion rotates `z` onto the point with the given spherical coordinates, but also rotates `x` and `y` onto the usual basis vectors (theta and phi, respectively) at that point. Parameters ---------- theta_phi: float or array of floats This argument may either contain an array with last dimension of size 2, where those two elements describe the (theta, phi) values in radians for each point; or it may contain just the theta values in radians, in which case the next argument must also be given. phi: None, float, or array of floats If this array is given, it must be able to broadcast against the first argument. Returns ------- R: quaternion array If the second argument is not given to this function, the shape will be the same as the input shape except for the last dimension, which will be removed. If the second argument is given, this output array will have the shape resulting from broadcasting the two input arrays against each other. """ # Figure out the input angles from either type of input if phi is None: theta_phi = np.asarray(theta_phi, dtype=np.double) theta = theta_phi[..., 0] phi = theta_phi[..., 1] else: theta = np.asarray(theta_phi, dtype=np.double) phi = np.asarray(phi, dtype=np.double) # Set up the output array R = np.empty(np.broadcast(theta, phi).shape + (4,), dtype=np.double) # Compute the actual values of the quaternion components R[..., 0] = np.cos(phi/2)*np.cos(theta/2) # scalar quaternion components R[..., 1] = -np.sin(phi/2)*np.sin(theta/2) # x quaternion components R[..., 2] = np.cos(phi/2)*np.sin(theta/2) # y quaternion components R[..., 3] = np.sin(phi/2)*np.cos(theta/2) # z quaternion components return as_quat_array(R)
python
{ "resource": "" }
q35217
rotate_vectors
train
def rotate_vectors(R, v, axis=-1): """Rotate vectors by given quaternions For simplicity, this function simply converts the input quaternion(s) to a matrix, and rotates the input vector(s) by the usual matrix multiplication. However, it should be noted that if each input quaternion is only used to rotate a single vector, it is more efficient (in terms of operation counts) to use the formula v' = v + 2 * r x (s * v + r x v) / m where x represents the cross product, s and r are the scalar and vector parts of the quaternion, respectively, and m is the sum of the squares of the components of the quaternion. If you are looping over a very large number of quaternions, and just rotating a single vector each time, you might want to implement that alternative algorithm using numba (or something that doesn't use python). Parameters ========== R: quaternion array Quaternions by which to rotate the input vectors v: float array Three-vectors to be rotated. axis: int Axis of the `v` array to use as the vector dimension. This axis of `v` must have length 3. Returns ======= vprime: float array The rotated vectors. This array has shape R.shape+v.shape. """ R = np.asarray(R, dtype=np.quaternion) v = np.asarray(v, dtype=float) if v.ndim < 1 or 3 not in v.shape: raise ValueError("Input `v` does not have at least one dimension of length 3") if v.shape[axis] != 3: raise ValueError("Input `v` axis {0} has length {1}, not 3.".format(axis, v.shape[axis])) m = as_rotation_matrix(R) m_axes = list(range(m.ndim)) v_axes = list(range(m.ndim, m.ndim+v.ndim)) mv_axes = list(v_axes) mv_axes[axis] = m_axes[-2] mv_axes = m_axes[:-2] + mv_axes v_axes[axis] = m_axes[-1] return np.einsum(m, m_axes, v, v_axes, mv_axes)
python
{ "resource": "" }
q35218
isclose
train
def isclose(a, b, rtol=4*np.finfo(float).eps, atol=0.0, equal_nan=False): """ Returns a boolean array where two arrays are element-wise equal within a tolerance. This function is essentially a copy of the `numpy.isclose` function, with different default tolerances and one minor changes necessary to deal correctly with quaternions. The tolerance values are positive, typically very small numbers. The relative difference (`rtol` * abs(`b`)) and the absolute difference `atol` are added together to compare against the absolute difference between `a` and `b`. Parameters ---------- a, b : array_like Input arrays to compare. rtol : float The relative tolerance parameter (see Notes). atol : float The absolute tolerance parameter (see Notes). equal_nan : bool Whether to compare NaN's as equal. If True, NaN's in `a` will be considered equal to NaN's in `b` in the output array. Returns ------- y : array_like Returns a boolean array of where `a` and `b` are equal within the given tolerance. If both `a` and `b` are scalars, returns a single boolean value. See Also -------- allclose Notes ----- For finite values, isclose uses the following equation to test whether two floating point values are equivalent: absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) The above equation is not symmetric in `a` and `b`, so that `isclose(a, b)` might be different from `isclose(b, a)` in some rare cases. Examples -------- >>> quaternion.isclose([1e10*quaternion.x, 1e-7*quaternion.y], [1.00001e10*quaternion.x, 1e-8*quaternion.y], ... rtol=1.e-5, atol=1.e-8) array([True, False]) >>> quaternion.isclose([1e10*quaternion.x, 1e-8*quaternion.y], [1.00001e10*quaternion.x, 1e-9*quaternion.y], ... rtol=1.e-5, atol=1.e-8) array([True, True]) >>> quaternion.isclose([1e10*quaternion.x, 1e-8*quaternion.y], [1.0001e10*quaternion.x, 1e-9*quaternion.y], ... rtol=1.e-5, atol=1.e-8) array([False, True]) >>> quaternion.isclose([quaternion.x, np.nan*quaternion.y], [quaternion.x, np.nan*quaternion.y]) array([True, False]) >>> quaternion.isclose([quaternion.x, np.nan*quaternion.y], [quaternion.x, np.nan*quaternion.y], equal_nan=True) array([True, True]) """ def within_tol(x, y, atol, rtol): with np.errstate(invalid='ignore'): result = np.less_equal(abs(x-y), atol + rtol * abs(y)) if np.isscalar(a) and np.isscalar(b): result = bool(result) return result x = np.array(a, copy=False, subok=True, ndmin=1) y = np.array(b, copy=False, subok=True, ndmin=1) # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT). # This will cause casting of x later. Also, make sure to allow subclasses # (e.g., for numpy.ma). try: dt = np.result_type(y, 1.) except TypeError: dt = np.dtype(np.quaternion) y = np.array(y, dtype=dt, copy=False, subok=True) xfin = np.isfinite(x) yfin = np.isfinite(y) if np.all(xfin) and np.all(yfin): return within_tol(x, y, atol, rtol) else: finite = xfin & yfin cond = np.zeros_like(finite, subok=True) # Because we're using boolean indexing, x & y must be the same shape. # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in # lib.stride_tricks, though, so we can't import it here. x = x * np.ones_like(cond) y = y * np.ones_like(cond) # Avoid subtraction with infinite/nan values... cond[finite] = within_tol(x[finite], y[finite], atol, rtol) # Check for equality of infinite values... cond[~finite] = (x[~finite] == y[~finite]) if equal_nan: # Make NaN == NaN both_nan = np.isnan(x) & np.isnan(y) cond[both_nan] = both_nan[both_nan] if np.isscalar(a) and np.isscalar(b): return bool(cond) else: return cond
python
{ "resource": "" }
q35219
allclose
train
def allclose(a, b, rtol=4*np.finfo(float).eps, atol=0.0, equal_nan=False, verbose=False): """ Returns True if two arrays are element-wise equal within a tolerance. This function is essentially a wrapper for the `quaternion.isclose` function, but returns a single boolean value of True if all elements of the output from `quaternion.isclose` are True, and False otherwise. This function also adds the option. Note that this function has stricter tolerances than the `numpy.allclose` function, as well as the additional `verbose` option. Parameters ---------- a, b : array_like Input arrays to compare. rtol : float The relative tolerance parameter (see Notes). atol : float The absolute tolerance parameter (see Notes). equal_nan : bool Whether to compare NaN's as equal. If True, NaN's in `a` will be considered equal to NaN's in `b` in the output array. verbose : bool If the return value is False, Returns ------- allclose : bool Returns True if the two arrays are equal within the given tolerance; False otherwise. See Also -------- isclose, numpy.all, numpy.any, numpy.allclose Returns ------- allclose : bool Returns True if the two arrays are equal within the given tolerance; False otherwise. Notes ----- If the following equation is element-wise True, then allclose returns True. absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) The above equation is not symmetric in `a` and `b`, so that `allclose(a, b)` might be different from `allclose(b, a)` in some rare cases. """ close = isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) result = np.all(close) if verbose and not result: print('Non-close values:') for i in np.argwhere(close == False): i = tuple(i) print('\n x[{0}]={1}\n y[{0}]={2}'.format(i, a[i], b[i])) return result
python
{ "resource": "" }
q35220
derivative
train
def derivative(f, t): """Fourth-order finite-differencing with non-uniform time steps The formula for this finite difference comes from Eq. (A 5b) of "Derivative formulas and errors for non-uniformly spaced points" by M. K. Bowen and Ronald Smith. As explained in their Eqs. (B 9b) and (B 10b), this is a fourth-order formula -- though that's a squishy concept with non-uniform time steps. TODO: If there are fewer than five points, the function should revert to simpler (lower-order) formulas. """ dfdt = np.empty_like(f) if (f.ndim == 1): _derivative(f, t, dfdt) elif (f.ndim == 2): _derivative_2d(f, t, dfdt) elif (f.ndim == 3): _derivative_3d(f, t, dfdt) else: raise NotImplementedError("Taking derivatives of {0}-dimensional arrays is not yet implemented".format(f.ndim)) return dfdt
python
{ "resource": "" }
q35221
autodiscover
train
def autodiscover(): """ Auto-discover INSTALLED_APPS translation.py modules and fail silently when not present. This forces an import on them to register. Also import explicit modules. """ import os import sys import copy from django.utils.module_loading import module_has_submodule from modeltranslation.translator import translator from modeltranslation.settings import TRANSLATION_FILES, DEBUG from importlib import import_module from django.apps import apps mods = [(app_config.name, app_config.module) for app_config in apps.get_app_configs()] for (app, mod) in mods: # Attempt to import the app's translation module. module = '%s.translation' % app before_import_registry = copy.copy(translator._registry) try: import_module(module) except: # Reset the model registry to the state before the last import as # this import will have to reoccur on the next request and this # could raise NotRegistered and AlreadyRegistered exceptions translator._registry = before_import_registry # Decide whether to bubble up this error. If the app just # doesn't have an translation module, we can ignore the error # attempting to import it, otherwise we want it to bubble up. if module_has_submodule(mod, 'translation'): raise for module in TRANSLATION_FILES: import_module(module) # In debug mode, print a list of registered models and pid to stdout. # Note: Differing model order is fine, we don't rely on a particular # order, as far as base classes are registered before subclasses. if DEBUG: try: if sys.argv[1] in ('runserver', 'runserver_plus'): models = translator.get_registered_models() names = ', '.join(m.__name__ for m in models) print('modeltranslation: Registered %d models for translation' ' (%s) [pid: %d].' % (len(models), names, os.getpid())) except IndexError: pass
python
{ "resource": "" }
q35222
build_css_class
train
def build_css_class(localized_fieldname, prefix=''): """ Returns a css class based on ``localized_fieldname`` which is easily splitable and capable of regionalized language codes. Takes an optional ``prefix`` which is prepended to the returned string. """ bits = localized_fieldname.split('_') css_class = '' if len(bits) == 1: css_class = str(localized_fieldname) elif len(bits) == 2: # Fieldname without underscore and short language code # Examples: # 'foo_de' --> 'foo-de', # 'bar_en' --> 'bar-en' css_class = '-'.join(bits) elif len(bits) > 2: # Try regionalized language code # Examples: # 'foo_es_ar' --> 'foo-es_ar', # 'foo_bar_zh_tw' --> 'foo_bar-zh_tw' css_class = _join_css_class(bits, 2) if not css_class: # Try short language code # Examples: # 'foo_bar_de' --> 'foo_bar-de', # 'foo_bar_baz_de' --> 'foo_bar_baz-de' css_class = _join_css_class(bits, 1) return '%s-%s' % (prefix, css_class) if prefix else css_class
python
{ "resource": "" }
q35223
unique
train
def unique(seq): """ Returns a generator yielding unique sequence members in order A set by itself will return unique values without any regard for order. >>> list(unique([1, 2, 3, 2, 2, 4, 1])) [1, 2, 3, 4] """ seen = set() return (x for x in seq if x not in seen and not seen.add(x))
python
{ "resource": "" }
q35224
resolution_order
train
def resolution_order(lang, override=None): """ Return order of languages which should be checked for parameter language. First is always the parameter language, later are fallback languages. Override parameter has priority over FALLBACK_LANGUAGES. """ if not settings.ENABLE_FALLBACKS: return (lang,) if override is None: override = {} fallback_for_lang = override.get(lang, settings.FALLBACK_LANGUAGES.get(lang, ())) fallback_def = override.get('default', settings.FALLBACK_LANGUAGES['default']) order = (lang,) + fallback_for_lang + fallback_def return tuple(unique(order))
python
{ "resource": "" }
q35225
fallbacks
train
def fallbacks(enable=True): """ Temporarily switch all language fallbacks on or off. Example: with fallbacks(False): lang_has_slug = bool(self.slug) May be used to enable fallbacks just when they're needed saving on some processing or check if there is a value for the current language (not knowing the language) """ current_enable_fallbacks = settings.ENABLE_FALLBACKS settings.ENABLE_FALLBACKS = enable try: yield finally: settings.ENABLE_FALLBACKS = current_enable_fallbacks
python
{ "resource": "" }
q35226
parse_field
train
def parse_field(setting, field_name, default): """ Extract result from single-value or dict-type setting like fallback_values. """ if isinstance(setting, dict): return setting.get(field_name, default) else: return setting
python
{ "resource": "" }
q35227
append_translated
train
def append_translated(model, fields): "If translated field is encountered, add also all its translation fields." fields = set(fields) from modeltranslation.translator import translator opts = translator.get_options_for_model(model) for key, translated in opts.fields.items(): if key in fields: fields = fields.union(f.name for f in translated) return fields
python
{ "resource": "" }
q35228
MultilingualQuerySet._rewrite_col
train
def _rewrite_col(self, col): """Django >= 1.7 column name rewriting""" if isinstance(col, Col): new_name = rewrite_lookup_key(self.model, col.target.name) if col.target.name != new_name: new_field = self.model._meta.get_field(new_name) if col.target is col.source: col.source = new_field col.target = new_field elif hasattr(col, 'col'): self._rewrite_col(col.col) elif hasattr(col, 'lhs'): self._rewrite_col(col.lhs)
python
{ "resource": "" }
q35229
MultilingualQuerySet._rewrite_where
train
def _rewrite_where(self, q): """ Rewrite field names inside WHERE tree. """ if isinstance(q, Lookup): self._rewrite_col(q.lhs) if isinstance(q, Node): for child in q.children: self._rewrite_where(child)
python
{ "resource": "" }
q35230
MultilingualQuerySet._rewrite_q
train
def _rewrite_q(self, q): """Rewrite field names inside Q call.""" if isinstance(q, tuple) and len(q) == 2: return rewrite_lookup_key(self.model, q[0]), q[1] if isinstance(q, Node): q.children = list(map(self._rewrite_q, q.children)) return q
python
{ "resource": "" }
q35231
MultilingualQuerySet._rewrite_f
train
def _rewrite_f(self, q): """ Rewrite field names inside F call. """ if isinstance(q, models.F): q.name = rewrite_lookup_key(self.model, q.name) return q if isinstance(q, Node): q.children = list(map(self._rewrite_f, q.children)) # Django >= 1.8 if hasattr(q, 'lhs'): q.lhs = self._rewrite_f(q.lhs) if hasattr(q, 'rhs'): q.rhs = self._rewrite_f(q.rhs) return q
python
{ "resource": "" }
q35232
MultilingualQuerySet.order_by
train
def order_by(self, *field_names): """ Change translatable field names in an ``order_by`` argument to translation fields for the current language. """ if not self._rewrite: return super(MultilingualQuerySet, self).order_by(*field_names) new_args = [] for key in field_names: new_args.append(rewrite_order_lookup_key(self.model, key)) return super(MultilingualQuerySet, self).order_by(*new_args)
python
{ "resource": "" }
q35233
add_translation_fields
train
def add_translation_fields(model, opts): """ Monkey patches the original model class to provide additional fields for every language. Adds newly created translation fields to the given translation options. """ model_empty_values = getattr(opts, 'empty_values', NONE) for field_name in opts.local_fields.keys(): field_empty_value = parse_field(model_empty_values, field_name, NONE) for l in mt_settings.AVAILABLE_LANGUAGES: # Create a dynamic translation field translation_field = create_translation_field( model=model, field_name=field_name, lang=l, empty_value=field_empty_value) # Construct the name for the localized field localized_field_name = build_localized_fieldname(field_name, l) # Check if the model already has a field by that name if hasattr(model, localized_field_name): # Check if are not dealing with abstract field inherited. for cls in model.__mro__: if hasattr(cls, '_meta') and cls.__dict__.get(localized_field_name, None): cls_opts = translator._get_options_for_model(cls) if not cls._meta.abstract or field_name not in cls_opts.local_fields: raise ValueError("Error adding translation field. Model '%s' already" " contains a field named '%s'." % (model._meta.object_name, localized_field_name)) # This approach implements the translation fields as full valid # django model fields and therefore adds them via add_to_class model.add_to_class(localized_field_name, translation_field) opts.add_translation_field(field_name, translation_field) # Rebuild information about parents fields. If there are opts.local_fields, field cache would be # invalidated (by model._meta.add_field() function). Otherwise, we need to do it manually. if len(opts.local_fields) == 0: model._meta._expire_cache() model._meta.get_fields()
python
{ "resource": "" }
q35234
patch_clean_fields
train
def patch_clean_fields(model): """ Patch clean_fields method to handle different form types submission. """ old_clean_fields = model.clean_fields def new_clean_fields(self, exclude=None): if hasattr(self, '_mt_form_pending_clear'): # Some form translation fields has been marked as clearing value. # Check if corresponding translated field was also saved (not excluded): # - if yes, it seems like form for MT-unaware app. Ignore clearing (left value from # translated field unchanged), as if field was omitted from form # - if no, then proceed as normally: clear the field for field_name, value in self._mt_form_pending_clear.items(): field = self._meta.get_field(field_name) orig_field_name = field.translated_field.name if orig_field_name in exclude: field.save_form_data(self, value, check=False) delattr(self, '_mt_form_pending_clear') old_clean_fields(self, exclude) model.clean_fields = new_clean_fields
python
{ "resource": "" }
q35235
patch_related_object_descriptor_caching
train
def patch_related_object_descriptor_caching(ro_descriptor): """ Patch SingleRelatedObjectDescriptor or ReverseSingleRelatedObjectDescriptor to use language-aware caching. """ class NewSingleObjectDescriptor(LanguageCacheSingleObjectDescriptor, ro_descriptor.__class__): pass if django.VERSION[0] == 2: ro_descriptor.related.get_cache_name = partial( NewSingleObjectDescriptor.get_cache_name, ro_descriptor, ) ro_descriptor.accessor = ro_descriptor.related.get_accessor_name() ro_descriptor.__class__ = NewSingleObjectDescriptor
python
{ "resource": "" }
q35236
TranslationOptions.validate
train
def validate(self): """ Perform options validation. """ # TODO: at the moment only required_languages is validated. # Maybe check other options as well? if self.required_languages: if isinstance(self.required_languages, (tuple, list)): self._check_languages(self.required_languages) else: self._check_languages(self.required_languages.keys(), extra=('default',)) for fieldnames in self.required_languages.values(): if any(f not in self.fields for f in fieldnames): raise ImproperlyConfigured( 'Fieldname in required_languages which is not in fields option.')
python
{ "resource": "" }
q35237
TranslationOptions.update
train
def update(self, other): """ Update with options from a superclass. """ if other.model._meta.abstract: self.local_fields.update(other.local_fields) self.fields.update(other.fields)
python
{ "resource": "" }
q35238
TranslationOptions.add_translation_field
train
def add_translation_field(self, field, translation_field): """ Add a new translation field to both fields dicts. """ self.local_fields[field].add(translation_field) self.fields[field].add(translation_field)
python
{ "resource": "" }
q35239
Translator.get_registered_models
train
def get_registered_models(self, abstract=True): """ Returns a list of all registered models, or just concrete registered models. """ return [model for (model, opts) in self._registry.items() if opts.registered and (not model._meta.abstract or abstract)]
python
{ "resource": "" }
q35240
Translator._get_options_for_model
train
def _get_options_for_model(self, model, opts_class=None, **options): """ Returns an instance of translation options with translated fields defined for the ``model`` and inherited from superclasses. """ if model not in self._registry: # Create a new type for backwards compatibility. opts = type("%sTranslationOptions" % model.__name__, (opts_class or TranslationOptions,), options)(model) # Fields for translation may be inherited from abstract # superclasses, so we need to look at all parents. for base in model.__bases__: if not hasattr(base, '_meta'): # Things without _meta aren't functional models, so they're # uninteresting parents. continue opts.update(self._get_options_for_model(base)) # Cache options for all models -- we may want to compute options # of registered subclasses of unregistered models. self._registry[model] = opts return self._registry[model]
python
{ "resource": "" }
q35241
Translator.get_options_for_model
train
def get_options_for_model(self, model): """ Thin wrapper around ``_get_options_for_model`` to preserve the semantic of throwing exception for models not directly registered. """ opts = self._get_options_for_model(model) if not opts.registered and not opts.related: raise NotRegistered('The model "%s" is not registered for ' 'translation' % model.__name__) return opts
python
{ "resource": "" }
q35242
Command.get_table_fields
train
def get_table_fields(self, db_table): """ Gets table fields from schema. """ db_table_desc = self.introspection.get_table_description(self.cursor, db_table) return [t[0] for t in db_table_desc]
python
{ "resource": "" }
q35243
Command.get_missing_languages
train
def get_missing_languages(self, field_name, db_table): """ Gets only missings fields. """ db_table_fields = self.get_table_fields(db_table) for lang_code in AVAILABLE_LANGUAGES: if build_localized_fieldname(field_name, lang_code) not in db_table_fields: yield lang_code
python
{ "resource": "" }
q35244
Command.get_sync_sql
train
def get_sync_sql(self, field_name, missing_langs, model): """ Returns SQL needed for sync schema for a new translatable field. """ qn = connection.ops.quote_name style = no_style() sql_output = [] db_table = model._meta.db_table for lang in missing_langs: new_field = build_localized_fieldname(field_name, lang) f = model._meta.get_field(new_field) col_type = f.db_type(connection=connection) field_sql = [style.SQL_FIELD(qn(f.column)), style.SQL_COLTYPE(col_type)] # column creation stmt = "ALTER TABLE %s ADD COLUMN %s" % (qn(db_table), ' '.join(field_sql)) if not f.null: stmt += " " + style.SQL_KEYWORD('NOT NULL') sql_output.append(stmt + ";") return sql_output
python
{ "resource": "" }
q35245
create_translation_field
train
def create_translation_field(model, field_name, lang, empty_value): """ Translation field factory. Returns a ``TranslationField`` based on a fieldname and a language. The list of supported fields can be extended by defining a tuple of field names in the projects settings.py like this:: MODELTRANSLATION_CUSTOM_FIELDS = ('MyField', 'MyOtherField',) If the class is neither a subclass of fields in ``SUPPORTED_FIELDS``, nor in ``CUSTOM_FIELDS`` an ``ImproperlyConfigured`` exception will be raised. """ if empty_value not in ('', 'both', None, NONE): raise ImproperlyConfigured('%s is not a valid empty_value.' % empty_value) field = model._meta.get_field(field_name) cls_name = field.__class__.__name__ if not (isinstance(field, SUPPORTED_FIELDS) or cls_name in mt_settings.CUSTOM_FIELDS): raise ImproperlyConfigured( '%s is not supported by modeltranslation.' % cls_name) translation_class = field_factory(field.__class__) return translation_class(translated_field=field, language=lang, empty_value=empty_value)
python
{ "resource": "" }
q35246
TranslationFieldDescriptor.meaningful_value
train
def meaningful_value(self, val, undefined): """ Check if val is considered non-empty. """ if isinstance(val, fields.files.FieldFile): return val.name and not ( isinstance(undefined, fields.files.FieldFile) and val == undefined) return val is not None and val != undefined
python
{ "resource": "" }
q35247
LanguageCacheSingleObjectDescriptor.cache_name
train
def cache_name(self): """ Used in django 1.x """ lang = get_language() cache = build_localized_fieldname(self.accessor, lang) return "_%s_cache" % cache
python
{ "resource": "" }
q35248
TranslationBaseModelAdmin.replace_orig_field
train
def replace_orig_field(self, option): """ Replaces each original field in `option` that is registered for translation by its translation fields. Returns a new list with replaced fields. If `option` contains no registered fields, it is returned unmodified. >>> self = TranslationAdmin() # PyFlakes >>> print(self.trans_opts.fields.keys()) ['title',] >>> get_translation_fields(self.trans_opts.fields.keys()[0]) ['title_de', 'title_en'] >>> self.replace_orig_field(['title', 'url']) ['title_de', 'title_en', 'url'] Note that grouped fields are flattened. We do this because: 1. They are hard to handle in the jquery-ui tabs implementation 2. They don't scale well with more than a few languages 3. It's better than not handling them at all (okay that's weak) >>> self.replace_orig_field((('title', 'url'), 'email', 'text')) ['title_de', 'title_en', 'url_de', 'url_en', 'email_de', 'email_en', 'text'] """ if option: option_new = list(option) for opt in option: if opt in self.trans_opts.fields: index = option_new.index(opt) option_new[index:index + 1] = get_translation_fields(opt) elif isinstance(opt, (tuple, list)) and ( [o for o in opt if o in self.trans_opts.fields]): index = option_new.index(opt) option_new[index:index + 1] = self.replace_orig_field(opt) option = option_new return option
python
{ "resource": "" }
q35249
TranslationBaseModelAdmin._get_form_or_formset
train
def _get_form_or_formset(self, request, obj, **kwargs): """ Generic code shared by get_form and get_formset. """ if self.exclude is None: exclude = [] else: exclude = list(self.exclude) exclude.extend(self.get_readonly_fields(request, obj)) if not self.exclude and hasattr(self.form, '_meta') and self.form._meta.exclude: # Take the custom ModelForm's Meta.exclude into account only if the # ModelAdmin doesn't define its own. exclude.extend(self.form._meta.exclude) # If exclude is an empty list we pass None to be consistant with the # default on modelform_factory exclude = self.replace_orig_field(exclude) or None exclude = self._exclude_original_fields(exclude) kwargs.update({'exclude': exclude}) return kwargs
python
{ "resource": "" }
q35250
TranslationBaseModelAdmin._get_fieldsets_post_form_or_formset
train
def _get_fieldsets_post_form_or_formset(self, request, form, obj=None): """ Generic get_fieldsets code, shared by TranslationAdmin and TranslationInlineModelAdmin. """ base_fields = self.replace_orig_field(form.base_fields.keys()) fields = base_fields + list(self.get_readonly_fields(request, obj)) return [(None, {'fields': self.replace_orig_field(fields)})]
python
{ "resource": "" }
q35251
ClearableWidgetWrapper.media
train
def media(self): """ Combines media of both components and adds a small script that unchecks the clear box, when a value in any wrapped input is modified. """ return self.widget.media + self.checkbox.media + Media(self.Media)
python
{ "resource": "" }
q35252
ClearableWidgetWrapper.value_from_datadict
train
def value_from_datadict(self, data, files, name): """ If the clear checkbox is checked returns the configured empty value, completely ignoring the original input. """ clear = self.checkbox.value_from_datadict(data, files, self.clear_checkbox_name(name)) if clear: return self.empty_value return self.widget.value_from_datadict(data, files, name)
python
{ "resource": "" }
q35253
setup_aiohttp_apispec
train
def setup_aiohttp_apispec( app: web.Application, *, title: str = "API documentation", version: str = "0.0.1", url: str = "/api/docs/swagger.json", request_data_name: str = "data", swagger_path: str = None, static_path: str = '/static/swagger', **kwargs ) -> None: """ aiohttp-apispec extension. Usage: .. code-block:: python from aiohttp_apispec import docs, request_schema, setup_aiohttp_apispec from aiohttp import web from marshmallow import Schema, fields class RequestSchema(Schema): id = fields.Int() name = fields.Str(description='name') bool_field = fields.Bool() @docs(tags=['mytag'], summary='Test method summary', description='Test method description') @request_schema(RequestSchema) async def index(request): return web.json_response({'msg': 'done', 'data': {}}) app = web.Application() app.router.add_post('/v1/test', index) # init docs with all parameters, usual for ApiSpec setup_aiohttp_apispec(app=app, title='My Documentation', version='v1', url='/api/docs/api-docs') # now we can find it on 'http://localhost:8080/api/docs/api-docs' web.run_app(app) :param Application app: aiohttp web app :param str title: API title :param str version: API version :param str url: url for swagger spec in JSON format :param str request_data_name: name of the key in Request object where validated data will be placed by validation_middleware (``'data'`` by default) :param str swagger_path: experimental SwaggerUI support (starting from v1.1.0). By default it is None (disabled) :param str static_path: path for static files used by SwaggerUI (if it is enabled with ``swagger_path``) :param kwargs: any apispec.APISpec kwargs """ AiohttpApiSpec( url, app, request_data_name, title=title, version=version, swagger_path=swagger_path, static_path=static_path, **kwargs )
python
{ "resource": "" }
q35254
docs
train
def docs(**kwargs): """ Annotate the decorated view function with the specified Swagger attributes. Usage: .. code-block:: python from aiohttp import web @docs(tags=['my_tag'], summary='Test method summary', description='Test method description', parameters=[{ 'in': 'header', 'name': 'X-Request-ID', 'schema': {'type': 'string', 'format': 'uuid'}, 'required': 'true' }] ) async def index(request): return web.json_response({'msg': 'done', 'data': {}}) """ def wrapper(func): kwargs["produces"] = ["application/json"] if not hasattr(func, "__apispec__"): func.__apispec__ = {"parameters": [], "responses": {}} extra_parameters = kwargs.pop("parameters", []) extra_responses = kwargs.pop("responses", {}) func.__apispec__["parameters"].extend(extra_parameters) func.__apispec__["responses"].update(extra_responses) func.__apispec__.update(kwargs) return func return wrapper
python
{ "resource": "" }
q35255
response_schema
train
def response_schema(schema, code=200, required=False, description=None): """ Add response info into the swagger spec Usage: .. code-block:: python from aiohttp import web from marshmallow import Schema, fields class ResponseSchema(Schema): msg = fields.Str() data = fields.Dict() @response_schema(ResponseSchema(), 200) async def index(request): return web.json_response({'msg': 'done', 'data': {}}) :param str description: response description :param bool required: :param schema: :class:`Schema <marshmallow.Schema>` class or instance :param int code: HTTP response code """ if callable(schema): schema = schema() def wrapper(func): if not hasattr(func, "__apispec__"): func.__apispec__ = {"parameters": [], "responses": {}} func.__apispec__["responses"]["%s" % code] = { "schema": schema, "required": required, "description": description, } return func return wrapper
python
{ "resource": "" }
q35256
validation_middleware
train
async def validation_middleware(request: web.Request, handler) -> web.Response: """ Validation middleware for aiohttp web app Usage: .. code-block:: python app.middlewares.append(validation_middleware) """ orig_handler = request.match_info.handler if not hasattr(orig_handler, "__schemas__"): if not issubclass_py37fix(orig_handler, web.View): return await handler(request) sub_handler = getattr(orig_handler, request.method.lower(), None) if sub_handler is None: return await handler(request) if not hasattr(sub_handler, "__schemas__"): return await handler(request) schemas = sub_handler.__schemas__ else: schemas = orig_handler.__schemas__ kwargs = {} for schema in schemas: data = await parser.parse( schema["schema"], request, locations=schema["locations"] ) if data: kwargs.update(data) kwargs.update(request.match_info) request[request.app["_apispec_request_data_name"]] = kwargs return await handler(request)
python
{ "resource": "" }
q35257
is_valid_input_array
train
def is_valid_input_array(x, ndim=None): """Test if ``x`` is a correctly shaped point array in R^d.""" x = np.asarray(x) if ndim is None or ndim == 1: return x.ndim == 1 and x.size > 1 or x.ndim == 2 and x.shape[0] == 1 else: return x.ndim == 2 and x.shape[0] == ndim
python
{ "resource": "" }
q35258
is_valid_input_meshgrid
train
def is_valid_input_meshgrid(x, ndim): """Test if ``x`` is a `meshgrid` sequence for points in R^d.""" # This case is triggered in FunctionSpaceElement.__call__ if the # domain does not have an 'ndim' attribute. We return False and # continue. if ndim is None: return False if not isinstance(x, tuple): return False if ndim > 1: try: np.broadcast(*x) except (ValueError, TypeError): # cannot be broadcast return False return (len(x) == ndim and all(isinstance(xi, np.ndarray) for xi in x) and all(xi.ndim == ndim for xi in x))
python
{ "resource": "" }
q35259
out_shape_from_meshgrid
train
def out_shape_from_meshgrid(mesh): """Get the broadcast output shape from a `meshgrid`.""" if len(mesh) == 1: return (len(mesh[0]),) else: return np.broadcast(*mesh).shape
python
{ "resource": "" }
q35260
out_shape_from_array
train
def out_shape_from_array(arr): """Get the output shape from an array.""" arr = np.asarray(arr) if arr.ndim == 1: return arr.shape else: return (arr.shape[1],)
python
{ "resource": "" }
q35261
vectorize._wrapper
train
def _wrapper(func, *vect_args, **vect_kwargs): """Return the vectorized wrapper function.""" if not hasattr(func, '__name__'): # Set name if not available. Happens if func is actually a function func.__name__ = '{}.__call__'.format(func.__class__.__name__) return wraps(func)(_NumpyVectorizeWrapper(func, *vect_args, **vect_kwargs))
python
{ "resource": "" }
q35262
ProductSpaceOperator._convert_to_spmatrix
train
def _convert_to_spmatrix(operators): """Convert an array-like object of operators to a sparse matrix.""" # Lazy import to improve `import odl` time import scipy.sparse # Convert ops to sparse representation. This is not trivial because # operators can be indexable themselves and give the wrong impression # of an extra dimension. So we have to infer the shape manually # first and extract the indices of nonzero positions. nrows = len(operators) ncols = None irow, icol, data = [], [], [] for i, row in enumerate(operators): try: iter(row) except TypeError: raise ValueError( '`operators` must be a matrix of `Operator` objects, `0` ' 'or `None`, got {!r} (row {} = {!r} is not iterable)' ''.format(operators, i, row)) if isinstance(row, Operator): raise ValueError( '`operators` must be a matrix of `Operator` objects, `0` ' 'or `None`, but row {} is an `Operator` {!r}' ''.format(i, row)) if ncols is None: ncols = len(row) elif len(row) != ncols: raise ValueError( 'all rows in `operators` must have the same length, but ' 'length {} of row {} differs from previous common length ' '{}'.format(len(row), i, ncols)) for j, col in enumerate(row): if col is None or col is 0: pass elif isinstance(col, Operator): irow.append(i) icol.append(j) data.append(col) else: raise ValueError( '`operators` must be a matrix of `Operator` objects, ' '`0` or `None`, got entry {!r} at ({}, {})' ''.format(col, i, j)) # Create object array explicitly, threby avoiding erroneous conversion # in `coo_matrix.__init__` data_arr = np.empty(len(data), dtype=object) data_arr[:] = data return scipy.sparse.coo_matrix((data_arr, (irow, icol)), shape=(nrows, ncols))
python
{ "resource": "" }
q35263
ProductSpaceOperator._call
train
def _call(self, x, out=None): """Call the operators on the parts of ``x``.""" # TODO: add optimization in case an operator appears repeatedly in a # row if out is None: out = self.range.zero() for i, j, op in zip(self.ops.row, self.ops.col, self.ops.data): out[i] += op(x[j]) else: has_evaluated_row = np.zeros(len(self.range), dtype=bool) for i, j, op in zip(self.ops.row, self.ops.col, self.ops.data): if not has_evaluated_row[i]: op(x[j], out=out[i]) else: # TODO: optimize out[i] += op(x[j]) has_evaluated_row[i] = True for i, evaluated in enumerate(has_evaluated_row): if not evaluated: out[i].set_zero() return out
python
{ "resource": "" }
q35264
ProductSpaceOperator.derivative
train
def derivative(self, x): """Derivative of the product space operator. Parameters ---------- x : `domain` element The point to take the derivative in Returns ------- adjoint : linear`ProductSpaceOperator` The derivative Examples -------- >>> r3 = odl.rn(3) >>> pspace = odl.ProductSpace(r3, r3) >>> I = odl.IdentityOperator(r3) >>> x = pspace.element([[1, 2, 3], [4, 5, 6]]) Example with linear operator (derivative is itself) >>> prod_op = ProductSpaceOperator([[0, I], [0, 0]], ... domain=pspace, range=pspace) >>> prod_op(x) ProductSpace(rn(3), 2).element([ [ 4., 5., 6.], [ 0., 0., 0.] ]) >>> prod_op.derivative(x)(x) ProductSpace(rn(3), 2).element([ [ 4., 5., 6.], [ 0., 0., 0.] ]) Example with affine operator >>> residual_op = I - r3.element([1, 1, 1]) >>> op = ProductSpaceOperator([[0, residual_op], [0, 0]], ... domain=pspace, range=pspace) Calling operator gives offset by [1, 1, 1] >>> op(x) ProductSpace(rn(3), 2).element([ [ 3., 4., 5.], [ 0., 0., 0.] ]) Derivative of affine operator does not have this offset >>> op.derivative(x)(x) ProductSpace(rn(3), 2).element([ [ 4., 5., 6.], [ 0., 0., 0.] ]) """ # Lazy import to improve `import odl` time import scipy.sparse # Short circuit optimization if self.is_linear: return self deriv_ops = [op.derivative(x[col]) for op, col in zip(self.ops.data, self.ops.col)] data = np.empty(len(deriv_ops), dtype=object) data[:] = deriv_ops indices = [self.ops.row, self.ops.col] shape = self.ops.shape deriv_matrix = scipy.sparse.coo_matrix((data, indices), shape) return ProductSpaceOperator(deriv_matrix, self.domain, self.range)
python
{ "resource": "" }
q35265
ComponentProjection._call
train
def _call(self, x, out=None): """Project ``x`` onto the subspace.""" if out is None: out = x[self.index].copy() else: out.assign(x[self.index]) return out
python
{ "resource": "" }
q35266
ComponentProjectionAdjoint._call
train
def _call(self, x, out=None): """Extend ``x`` from the subspace.""" if out is None: out = self.range.zero() else: out.set_zero() out[self.index] = x return out
python
{ "resource": "" }
q35267
BroadcastOperator._call
train
def _call(self, x, out=None): """Evaluate all operators in ``x`` and broadcast.""" wrapped_x = self.prod_op.domain.element([x], cast=False) return self.prod_op(wrapped_x, out=out)
python
{ "resource": "" }
q35268
BroadcastOperator.derivative
train
def derivative(self, x): """Derivative of the broadcast operator. Parameters ---------- x : `domain` element The point to take the derivative in Returns ------- adjoint : linear `BroadcastOperator` The derivative Examples -------- Example with an affine operator: >>> I = odl.IdentityOperator(odl.rn(3)) >>> residual_op = I - I.domain.element([1, 1, 1]) >>> op = BroadcastOperator(residual_op, 2 * residual_op) Calling operator offsets by ``[1, 1, 1]``: >>> x = [1, 2, 3] >>> op(x) ProductSpace(rn(3), 2).element([ [ 0., 1., 2.], [ 0., 2., 4.] ]) The derivative of this affine operator does not have an offset: >>> op.derivative(x)(x) ProductSpace(rn(3), 2).element([ [ 1., 2., 3.], [ 2., 4., 6.] ]) """ return BroadcastOperator(*[op.derivative(x) for op in self.operators])
python
{ "resource": "" }
q35269
ReductionOperator._call
train
def _call(self, x, out=None): """Apply operators to ``x`` and sum.""" if out is None: return self.prod_op(x)[0] else: wrapped_out = self.prod_op.range.element([out], cast=False) pspace_result = self.prod_op(x, out=wrapped_out) return pspace_result[0]
python
{ "resource": "" }
q35270
ReductionOperator.derivative
train
def derivative(self, x): """Derivative of the reduction operator. Parameters ---------- x : `domain` element The point to take the derivative in. Returns ------- derivative : linear `BroadcastOperator` Examples -------- >>> r3 = odl.rn(3) >>> I = odl.IdentityOperator(r3) >>> x = [1.0, 2.0, 3.0] >>> y = [4.0, 6.0, 8.0] Example with linear operator (derivative is itself) >>> op = ReductionOperator(I, 2 * I) >>> op([x, y]) rn(3).element([ 9., 14., 19.]) >>> op.derivative([x, y])([x, y]) rn(3).element([ 9., 14., 19.]) Example with affine operator >>> residual_op = I - r3.element([1, 1, 1]) >>> op = ReductionOperator(residual_op, 2 * residual_op) Calling operator gives offset by [3, 3, 3] >>> op([x, y]) rn(3).element([ 6., 11., 16.]) Derivative of affine operator does not have this offset >>> op.derivative([x, y])([x, y]) rn(3).element([ 9., 14., 19.]) """ return ReductionOperator(*[op.derivative(xi) for op, xi in zip(self.operators, x)])
python
{ "resource": "" }
q35271
load_julia_with_Shearlab
train
def load_julia_with_Shearlab(): """Function to load Shearlab.""" # Importing base j = julia.Julia() j.eval('using Shearlab') j.eval('using PyPlot') j.eval('using Images') return j
python
{ "resource": "" }
q35272
load_image
train
def load_image(name, n, m=None, gpu=None, square=None): """Function to load images with certain size.""" if m is None: m = n if gpu is None: gpu = 0 if square is None: square = 0 command = ('Shearlab.load_image("{}", {}, {}, {}, {})'.format(name, n, m, gpu, square)) return j.eval(command)
python
{ "resource": "" }
q35273
imageplot
train
def imageplot(f, str=None, sbpt=None): """Plot an image generated by the library.""" # Function to plot images if str is None: str = '' if sbpt is None: sbpt = [] if sbpt != []: plt.subplot(sbpt[0], sbpt[1], sbpt[2]) imgplot = plt.imshow(f, interpolation='nearest') imgplot.set_cmap('gray') plt.axis('off') if str != '': plt.title(str)
python
{ "resource": "" }
q35274
getshearletsystem2D
train
def getshearletsystem2D(rows, cols, nScales, shearLevels=None, full=None, directionalFilter=None, quadratureMirrorFilter=None): """Function to generate de 2D system.""" if shearLevels is None: shearLevels = [float(ceil(i / 2)) for i in range(1, nScales + 1)] if full is None: full = 0 if directionalFilter is None: directionalFilter = 'Shearlab.filt_gen("directional_shearlet")' if quadratureMirrorFilter is None: quadratureMirrorFilter = 'Shearlab.filt_gen("scaling_shearlet")' j.eval('rows=' + str(rows)) j.eval('cols=' + str(cols)) j.eval('nScales=' + str(nScales)) j.eval('shearLevels=' + str(shearLevels)) j.eval('full=' + str(full)) j.eval('directionalFilter=' + directionalFilter) j.eval('quadratureMirrorFilter=' + quadratureMirrorFilter) j.eval('shearletsystem=Shearlab.getshearletsystem2D(rows, ' 'cols, nScales, shearLevels, full, directionalFilter, ' 'quadratureMirrorFilter) ') shearlets = j.eval('shearletsystem.shearlets') size = j.eval('shearletsystem.size') shearLevels = j.eval('shearletsystem.shearLevels') full = j.eval('shearletsystem.full') nShearlets = j.eval('shearletsystem.nShearlets') shearletIdxs = j.eval('shearletsystem.shearletIdxs') dualFrameWeights = j.eval('shearletsystem.dualFrameWeights') RMS = j.eval('shearletsystem.RMS') isComplex = j.eval('shearletsystem.isComplex') j.eval('shearletsystem = 0') return Shearletsystem2D(shearlets, size, shearLevels, full, nShearlets, shearletIdxs, dualFrameWeights, RMS, isComplex)
python
{ "resource": "" }
q35275
sheardec2D
train
def sheardec2D(X, shearletsystem): """Shearlet Decomposition function.""" coeffs = np.zeros(shearletsystem.shearlets.shape, dtype=complex) Xfreq = fftshift(fft2(ifftshift(X))) for i in range(shearletsystem.nShearlets): coeffs[:, :, i] = fftshift(ifft2(ifftshift(Xfreq * np.conj( shearletsystem.shearlets[:, :, i])))) return coeffs.real
python
{ "resource": "" }
q35276
ShearlabOperator.adjoint
train
def adjoint(self): """The adjoint operator.""" op = self class ShearlabOperatorAdjoint(odl.Operator): """Adjoint of the shearlet transform. See Also -------- odl.contrib.shearlab.ShearlabOperator """ def __init__(self): """Initialize a new instance.""" self.mutex = op.mutex self.shearlet_system = op.shearlet_system super(ShearlabOperatorAdjoint, self).__init__( op.range, op.domain, True) def _call(self, x): """``self(x)``.""" with op.mutex: x = np.moveaxis(x, 0, -1) return sheardecadjoint2D(x, op.shearlet_system) @property def adjoint(self): """The adjoint operator.""" return op @property def inverse(self): """The inverse operator.""" op = self class ShearlabOperatorAdjointInverse(odl.Operator): """ Adjoint of the inverse/Inverse of the adjoint of shearlet transform. See Also -------- odl.contrib.shearlab.ShearlabOperator """ def __init__(self): """Initialize a new instance.""" self.mutex = op.mutex self.shearlet_system = op.shearlet_system super(ShearlabOperatorAdjointInverse, self).__init__( op.range, op.domain, True) def _call(self, x): """``self(x)``.""" with op.mutex: result = shearrecadjoint2D(x, op.shearlet_system) return np.moveaxis(result, -1, 0) @property def adjoint(self): """The adjoint operator.""" return op.adjoint.inverse @property def inverse(self): """The inverse operator.""" return op return ShearlabOperatorAdjointInverse() return ShearlabOperatorAdjoint()
python
{ "resource": "" }
q35277
ShearlabOperator.inverse
train
def inverse(self): """The inverse operator.""" op = self class ShearlabOperatorInverse(odl.Operator): """Inverse of the shearlet transform. See Also -------- odl.contrib.shearlab.ShearlabOperator """ def __init__(self): """Initialize a new instance.""" self.mutex = op.mutex self.shearlet_system = op.shearlet_system super(ShearlabOperatorInverse, self).__init__( op.range, op.domain, True) def _call(self, x): """``self(x)``.""" with op.mutex: x = np.moveaxis(x, 0, -1) return shearrec2D(x, op.shearlet_system) @property def adjoint(self): """The inverse operator.""" op = self class ShearlabOperatorInverseAdjoint(odl.Operator): """ Adjoint of the inverse/Inverse of the adjoint of shearlet transform. See Also -------- odl.contrib.shearlab.ShearlabOperator """ def __init__(self): """Initialize a new instance.""" self.mutex = op.mutex self.shearlet_system = op.shearlet_system super(ShearlabOperatorInverseAdjoint, self).__init__( op.range, op.domain, True) def _call(self, x): """``self(x)``.""" with op.mutex: result = shearrecadjoint2D(x, op.shearlet_system) return np.moveaxis(result, -1, 0) @property def adjoint(self): """The adjoint operator.""" return op @property def inverse(self): """The inverse operator.""" return op.inverse.adjoint return ShearlabOperatorInverseAdjoint() @property def inverse(self): """The inverse operator.""" return op return ShearlabOperatorInverse()
python
{ "resource": "" }
q35278
submarine
train
def submarine(space, smooth=True, taper=20.0): """Return a 'submarine' phantom consisting in an ellipsoid and a box. Parameters ---------- space : `DiscreteLp` Discretized space in which the phantom is supposed to be created. smooth : bool, optional If ``True``, the boundaries are smoothed out. Otherwise, the function steps from 0 to 1 at the boundaries. taper : float, optional Tapering parameter for the boundary smoothing. Larger values mean faster taper, i.e. sharper boundaries. Returns ------- phantom : ``space`` element The submarine phantom in ``space``. """ if space.ndim == 2: if smooth: return _submarine_2d_smooth(space, taper) else: return _submarine_2d_nonsmooth(space) else: raise ValueError('phantom only defined in 2 dimensions, got {}' ''.format(space.ndim))
python
{ "resource": "" }
q35279
_submarine_2d_smooth
train
def _submarine_2d_smooth(space, taper): """Return a 2d smooth 'submarine' phantom.""" def logistic(x, c): """Smoothed step function from 0 to 1, centered at 0.""" return 1. / (1 + np.exp(-c * x)) def blurred_ellipse(x): """Blurred characteristic function of an ellipse. If ``space.domain`` is a rectangle ``[0, 1] x [0, 1]``, the ellipse is centered at ``(0.6, 0.3)`` and has half-axes ``(0.4, 0.14)``. For other domains, the values are scaled accordingly. """ halfaxes = np.array([0.4, 0.14]) * space.domain.extent center = np.array([0.6, 0.3]) * space.domain.extent center += space.domain.min() # Efficiently calculate |z|^2, z = (x - center) / radii sq_ndist = np.zeros_like(x[0]) for xi, rad, cen in zip(x, halfaxes, center): sq_ndist = sq_ndist + ((xi - cen) / rad) ** 2 out = np.sqrt(sq_ndist) out -= 1 # Return logistic(taper * (1 - |z|)) return logistic(out, -taper) def blurred_rect(x): """Blurred characteristic function of a rectangle. If ``space.domain`` is a rectangle ``[0, 1] x [0, 1]``, the rect has lower left ``(0.56, 0.4)`` and upper right ``(0.76, 0.6)``. For other domains, the values are scaled accordingly. """ xlower = np.array([0.56, 0.4]) * space.domain.extent xlower += space.domain.min() xupper = np.array([0.76, 0.6]) * space.domain.extent xupper += space.domain.min() out = np.ones_like(x[0]) for xi, low, upp in zip(x, xlower, xupper): length = upp - low out = out * (logistic((xi - low) / length, taper) * logistic((upp - xi) / length, taper)) return out out = space.element(blurred_ellipse) out += space.element(blurred_rect) return out.ufuncs.minimum(1, out=out)
python
{ "resource": "" }
q35280
_submarine_2d_nonsmooth
train
def _submarine_2d_nonsmooth(space): """Return a 2d nonsmooth 'submarine' phantom.""" def ellipse(x): """Characteristic function of an ellipse. If ``space.domain`` is a rectangle ``[0, 1] x [0, 1]``, the ellipse is centered at ``(0.6, 0.3)`` and has half-axes ``(0.4, 0.14)``. For other domains, the values are scaled accordingly. """ halfaxes = np.array([0.4, 0.14]) * space.domain.extent center = np.array([0.6, 0.3]) * space.domain.extent center += space.domain.min() sq_ndist = np.zeros_like(x[0]) for xi, rad, cen in zip(x, halfaxes, center): sq_ndist = sq_ndist + ((xi - cen) / rad) ** 2 return np.where(sq_ndist <= 1, 1, 0) def rect(x): """Characteristic function of a rectangle. If ``space.domain`` is a rectangle ``[0, 1] x [0, 1]``, the rect has lower left ``(0.56, 0.4)`` and upper right ``(0.76, 0.6)``. For other domains, the values are scaled accordingly. """ xlower = np.array([0.56, 0.4]) * space.domain.extent xlower += space.domain.min() xupper = np.array([0.76, 0.6]) * space.domain.extent xupper += space.domain.min() out = np.ones_like(x[0]) for xi, low, upp in zip(x, xlower, xupper): out = out * ((xi >= low) & (xi <= upp)) return out out = space.element(ellipse) out += space.element(rect) return out.ufuncs.minimum(1, out=out)
python
{ "resource": "" }
q35281
text
train
def text(space, text, font=None, border=0.2, inverted=True): """Create phantom from text. The text is represented by a scalar image taking values in [0, 1]. Depending on the choice of font, the text may or may not be anti-aliased. anti-aliased text can take any value between 0 and 1, while non-anti-aliased text produces a binary image. This method requires the ``pillow`` package. Parameters ---------- space : `DiscreteLp` Discretized space in which the phantom is supposed to be created. Must be two-dimensional. text : str The text that should be written onto the background. font : str, optional The font that should be used to write the text. Available options are platform dependent. Default: Platform dependent. 'arial' for windows, 'LiberationSans-Regular' for linux and 'Helvetica' for OSX border : float, optional Padding added around the text. 0.0 indicates that the phantom should occupy all of the space along its largest dimension while 1.0 gives a maximally padded image (text not visible). inverted : bool, optional If the phantom should be given in inverted style, i.e. white on black. Returns ------- phantom : ``space`` element The text phantom in ``space``. Notes ----- The set of available fonts is highly platform dependent, and there is no obvious way (except from trial and error) to find what fonts are supported on an arbitrary platform. In general, the fonts ``'arial'``, ``'calibri'`` and ``'impact'`` tend to be available on windows. Platform dependent tricks: **Linux**:: $ find /usr/share/fonts -name "*.[to]tf" """ from PIL import Image, ImageDraw, ImageFont if space.ndim != 2: raise ValueError('`space` must be two-dimensional') if font is None: platform = sys.platform if platform == 'win32': # Windows font = 'arial' elif platform == 'darwin': # Mac OSX font = 'Helvetica' else: # Assume platform is linux font = 'LiberationSans-Regular' text = str(text) # Figure out what font size we should use by creating a very high # resolution font and calculating the size of the text in this font init_size = 1000 init_pil_font = ImageFont.truetype(font + ".ttf", size=init_size, encoding="unic") init_text_width, init_text_height = init_pil_font.getsize(text) # True size is given by how much too large (or small) the example was scaled_init_size = (1.0 - border) * init_size size = scaled_init_size * min([space.shape[0] / init_text_width, space.shape[1] / init_text_height]) size = int(size) # Create font pil_font = ImageFont.truetype(font + ".ttf", size=size, encoding="unic") text_width, text_height = pil_font.getsize(text) # create a blank canvas with extra space between lines canvas = Image.new('RGB', space.shape, (255, 255, 255)) # draw the text onto the canvas draw = ImageDraw.Draw(canvas) offset = ((space.shape[0] - text_width) // 2, (space.shape[1] - text_height) // 2) white = "#000000" draw.text(offset, text, font=pil_font, fill=white) # Convert the canvas into an array with values in [0, 1] arr = np.asarray(canvas) arr = np.sum(arr, -1) arr = arr / np.max(arr) arr = np.rot90(arr, -1) if inverted: arr = 1 - arr return space.element(arr)
python
{ "resource": "" }
q35282
Weighting.norm
train
def norm(self, x): """Calculate the norm of an element. This is the standard implementation using `inner`. Subclasses should override it for optimization purposes. Parameters ---------- x1 : `LinearSpaceElement` Element whose norm is calculated. Returns ------- norm : float The norm of the element. """ return float(np.sqrt(self.inner(x, x).real))
python
{ "resource": "" }
q35283
MatrixWeighting.is_valid
train
def is_valid(self): """Test if the matrix is positive definite Hermitian. If the matrix decomposition is available, this test checks if all eigenvalues are positive. Otherwise, the test tries to calculate a Cholesky decomposition, which can be very time-consuming for large matrices. Sparse matrices are not supported. """ # Lazy import to improve `import odl` time import scipy.sparse if scipy.sparse.isspmatrix(self.matrix): raise NotImplementedError('validation not supported for sparse ' 'matrices') elif self._eigval is not None: return np.all(np.greater(self._eigval, 0)) else: try: np.linalg.cholesky(self.matrix) return np.array_equal(self.matrix, self.matrix.conj().T) except np.linalg.LinAlgError: return False
python
{ "resource": "" }
q35284
MatrixWeighting.matrix_decomp
train
def matrix_decomp(self, cache=None): """Compute a Hermitian eigenbasis decomposition of the matrix. Parameters ---------- cache : bool or None, optional If ``True``, store the decomposition internally. For None, the ``cache_mat_decomp`` from class initialization is used. Returns ------- eigval : `numpy.ndarray` One-dimensional array of eigenvalues. Its length is equal to the number of matrix rows. eigvec : `numpy.ndarray` Two-dimensional array of eigenvectors. It has the same shape as the decomposed matrix. See Also -------- scipy.linalg.decomp.eigh : Implementation of the decomposition. Standard parameters are used here. Raises ------ NotImplementedError if the matrix is sparse (not supported by scipy 0.17) """ # Lazy import to improve `import odl` time import scipy.linalg import scipy.sparse # TODO: fix dead link `scipy.linalg.decomp.eigh` if scipy.sparse.isspmatrix(self.matrix): raise NotImplementedError('sparse matrix not supported') if cache is None: cache = self._cache_mat_decomp if self._eigval is None or self._eigvec is None: eigval, eigvec = scipy.linalg.eigh(self.matrix) if cache: self._eigval = eigval self._eigvec = eigvec else: eigval, eigvec = self._eigval, self._eigvec return eigval, eigvec
python
{ "resource": "" }
q35285
ArrayWeighting.equiv
train
def equiv(self, other): """Return True if other is an equivalent weighting. Returns ------- equivalent : bool ``True`` if ``other`` is a `Weighting` instance with the same `Weighting.impl`, which yields the same result as this weighting for any input, ``False`` otherwise. This is checked by entry-wise comparison of arrays/constants. """ # Optimization for equality if self == other: return True elif (not isinstance(other, Weighting) or self.exponent != other.exponent): return False elif isinstance(other, MatrixWeighting): return other.equiv(self) elif isinstance(other, ConstWeighting): return np.array_equiv(self.array, other.const) else: return np.array_equal(self.array, other.array)
python
{ "resource": "" }
q35286
dca
train
def dca(x, f, g, niter, callback=None): r"""Subgradient DCA of Tao and An. This algorithm solves a problem of the form :: min_x f(x) - g(x), where ``f`` and ``g`` are proper, convex and lower semicontinuous functions. Parameters ---------- x : `LinearSpaceElement` Initial point, updated in-place. f : `Functional` Convex functional. Needs to implement ``f.convex_conj.gradient``. g : `Functional` Convex functional. Needs to implement ``g.gradient``. niter : int Number of iterations. callback : callable, optional Function called with the current iterate after each iteration. Notes ----- The algorithm is described in Section 3 and in particular in Theorem 3 of `[TA1997] <http://journals.math.ac.vn/acta/pdf/9701289.pdf>`_. The problem .. math:: \min f(x) - g(x) has the first-order optimality condition :math:`0 \in \partial f(x) - \partial g(x)`, i.e., aims at finding an :math:`x` so that there exists a common element .. math:: y \in \partial f(x) \cap \partial g(x). The element :math:`y` can be seen as a solution of the Toland dual problem .. math:: \min g^*(y) - f^*(y) and the iteration is given by .. math:: y_n \in \partial g(x_n), \qquad x_{n+1} \in \partial f^*(y_n), for :math:`n\geq 0`. Here, a subgradient is found by evaluating the gradient method of the respective functionals. References ---------- [TA1997] Tao, P D, and An, L T H. *Convex analysis approach to d.c. programming: Theory, algorithms and applications*. Acta Mathematica Vietnamica, 22.1 (1997), pp 289--355. See also -------- prox_dca : Solver with a proximal step for ``f`` and a subgradient step for ``g``. doubleprox_dc : Solver with proximal steps for all the nonsmooth convex functionals and a gradient step for a smooth functional. """ space = f.domain if g.domain != space: raise ValueError('`f.domain` and `g.domain` need to be equal, but ' '{} != {}'.format(space, g.domain)) f_convex_conj = f.convex_conj for _ in range(niter): f_convex_conj.gradient(g.gradient(x), out=x) if callback is not None: callback(x)
python
{ "resource": "" }
q35287
prox_dca
train
def prox_dca(x, f, g, niter, gamma, callback=None): r"""Proximal DCA of Sun, Sampaio and Candido. This algorithm solves a problem of the form :: min_x f(x) - g(x) where ``f`` and ``g`` are two proper, convex and lower semicontinuous functions. Parameters ---------- x : `LinearSpaceElement` Initial point, updated in-place. f : `Functional` Convex functional. Needs to implement ``f.proximal``. g : `Functional` Convex functional. Needs to implement ``g.gradient``. niter : int Number of iterations. gamma : positive float Stepsize in the primal updates. callback : callable, optional Function called with the current iterate after each iteration. Notes ----- The algorithm was proposed as Algorithm 2.3 in `[SSC2003] <http://www.global-sci.org/jcm/readabs.php?vol=21&no=4&page=451&year=2003&ppage=462>`_. It solves the problem .. math :: \min f(x) - g(x) by using subgradients of :math:`g` and proximal points of :math:`f`. The iteration is given by .. math :: y_n \in \partial g(x_n), \qquad x_{n+1} = \mathrm{Prox}_{\gamma f}(x_n + \gamma y_n). In contrast to `dca`, `prox_dca` uses proximal steps with respect to the convex part ``f``. Both algorithms use subgradients of the concave part ``g``. References ---------- [SSC2003] Sun, W, Sampaio R J B, and Candido M A B. *Proximal point algorithm for minimization of DC function*. Journal of Computational Mathematics, 21.4 (2003), pp 451--462. See also -------- dca : Solver with subgradinet steps for all the functionals. doubleprox_dc : Solver with proximal steps for all the nonsmooth convex functionals and a gradient step for a smooth functional. """ space = f.domain if g.domain != space: raise ValueError('`f.domain` and `g.domain` need to be equal, but ' '{} != {}'.format(space, g.domain)) for _ in range(niter): f.proximal(gamma)(x.lincomb(1, x, gamma, g.gradient(x)), out=x) if callback is not None: callback(x)
python
{ "resource": "" }
q35288
doubleprox_dc
train
def doubleprox_dc(x, y, f, phi, g, K, niter, gamma, mu, callback=None): r"""Double-proxmial gradient d.c. algorithm of Banert and Bot. This algorithm solves a problem of the form :: min_x f(x) + phi(x) - g(Kx). Parameters ---------- x : `LinearSpaceElement` Initial primal guess, updated in-place. y : `LinearSpaceElement` Initial dual guess, updated in-place. f : `Functional` Convex functional. Needs to implement ``g.proximal``. phi : `Functional` Convex functional. Needs to implement ``phi.gradient``. Convergence can be guaranteed if the gradient is Lipschitz continuous. g : `Functional` Convex functional. Needs to implement ``h.convex_conj.proximal``. K : `Operator` Linear operator. Needs to implement ``K.adjoint`` niter : int Number of iterations. gamma : positive float Stepsize in the primal updates. mu : positive float Stepsize in the dual updates. callback : callable, optional Function called with the current iterate after each iteration. Notes ----- This algorithm is proposed in `[BB2016] <https://arxiv.org/abs/1610.06538>`_ and solves the d.c. problem .. math :: \min_x f(x) + \varphi(x) - g(Kx) together with its Toland dual .. math :: \min_y g^*(y) - (f + \varphi)^*(K^* y). The iterations are given by .. math :: x_{n+1} &= \mathrm{Prox}_{\gamma f} (x_n + \gamma (K^* y_n - \nabla \varphi(x_n))), \\ y_{n+1} &= \mathrm{Prox}_{\mu g^*} (y_n + \mu K x_{n+1}). To guarantee convergence, the parameter :math:`\gamma` must satisfy :math:`0 < \gamma < 2/L` where :math:`L` is the Lipschitz constant of :math:`\nabla \varphi`. References ---------- [BB2016] Banert, S, and Bot, R I. *A general double-proximal gradient algorithm for d.c. programming*. arXiv:1610.06538 [math.OC] (2016). See also -------- dca : Solver with subgradient steps for all the functionals. prox_dca : Solver with a proximal step for ``f`` and a subgradient step for ``g``. """ primal_space = f.domain dual_space = g.domain if phi.domain != primal_space: raise ValueError('`f.domain` and `phi.domain` need to be equal, but ' '{} != {}'.format(primal_space, phi.domain)) if K.domain != primal_space: raise ValueError('`f.domain` and `K.domain` need to be equal, but ' '{} != {}'.format(primal_space, K.domain)) if K.range != dual_space: raise ValueError('`g.domain` and `K.range` need to be equal, but ' '{} != {}'.format(dual_space, K.range)) g_convex_conj = g.convex_conj for _ in range(niter): f.proximal(gamma)(x.lincomb(1, x, gamma, K.adjoint(y) - phi.gradient(x)), out=x) g_convex_conj.proximal(mu)(y.lincomb(1, y, mu, K(x)), out=y) if callback is not None: callback(x)
python
{ "resource": "" }
q35289
doubleprox_dc_simple
train
def doubleprox_dc_simple(x, y, f, phi, g, K, niter, gamma, mu): """Non-optimized version of ``doubleprox_dc``. This function is intended for debugging. It makes a lot of copies and performs no error checking. """ for _ in range(niter): f.proximal(gamma)(x + gamma * K.adjoint(y) - gamma * phi.gradient(x), out=x) g.convex_conj.proximal(mu)(y + mu * K(x), out=y)
python
{ "resource": "" }
q35290
matrix_representation
train
def matrix_representation(op): """Return a matrix representation of a linear operator. Parameters ---------- op : `Operator` The linear operator of which one wants a matrix representation. If the domain or range is a `ProductSpace`, it must be a power-space. Returns ------- matrix : `numpy.ndarray` The matrix representation of the operator. The shape will be ``op.domain.shape + op.range.shape`` and the dtype is the promoted (greatest) dtype of the domain and range. Examples -------- Approximate a matrix on its own: >>> mat = np.array([[1, 2, 3], ... [4, 5, 6], ... [7, 8, 9]]) >>> op = odl.MatrixOperator(mat) >>> matrix_representation(op) array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) It also works with `ProductSpace`'s and higher dimensional `TensorSpace`'s. In this case, the returned "matrix" will also be higher dimensional: >>> space = odl.uniform_discr([0, 0], [2, 2], (2, 2)) >>> grad = odl.Gradient(space) >>> tensor = odl.matrix_representation(grad) >>> tensor.shape == (2, 2, 2, 2, 2) True Since the "matrix" is now higher dimensional, we need to use e.g. `numpy.tensordot` if we want to compute with the matrix representation: >>> x = space.element(lambda x: x[0] ** 2 + 2 * x[1] ** 2) >>> grad(x) ProductSpace(uniform_discr([ 0., 0.], [ 2., 2.], (2, 2)), 2).element([ <BLANKLINE> [[ 2. , 2. ], [-2.75, -6.75]], <BLANKLINE> [[ 4. , -4.75], [ 4. , -6.75]] ]) >>> np.tensordot(tensor, x, axes=grad.domain.ndim) array([[[ 2. , 2. ], [-2.75, -6.75]], <BLANKLINE> [[ 4. , -4.75], [ 4. , -6.75]]]) Notes ---------- The algorithm works by letting the operator act on all unit vectors, and stacking the output as a matrix. """ if not op.is_linear: raise ValueError('the operator is not linear') if not (isinstance(op.domain, TensorSpace) or (isinstance(op.domain, ProductSpace) and op.domain.is_power_space and all(isinstance(spc, TensorSpace) for spc in op.domain))): raise TypeError('operator domain {!r} is neither `TensorSpace` ' 'nor `ProductSpace` with only equal `TensorSpace` ' 'components'.format(op.domain)) if not (isinstance(op.range, TensorSpace) or (isinstance(op.range, ProductSpace) and op.range.is_power_space and all(isinstance(spc, TensorSpace) for spc in op.range))): raise TypeError('operator range {!r} is neither `TensorSpace` ' 'nor `ProductSpace` with only equal `TensorSpace` ' 'components'.format(op.range)) # Generate the matrix dtype = np.promote_types(op.domain.dtype, op.range.dtype) matrix = np.zeros(op.range.shape + op.domain.shape, dtype=dtype) tmp_ran = op.range.element() # Store for reuse in loop tmp_dom = op.domain.zero() # Store for reuse in loop for j in nd_iterator(op.domain.shape): tmp_dom[j] = 1.0 op(tmp_dom, out=tmp_ran) matrix[(Ellipsis,) + j] = tmp_ran.asarray() tmp_dom[j] = 0.0 return matrix
python
{ "resource": "" }
q35291
power_method_opnorm
train
def power_method_opnorm(op, xstart=None, maxiter=100, rtol=1e-05, atol=1e-08, callback=None): r"""Estimate the operator norm with the power method. Parameters ---------- op : `Operator` Operator whose norm is to be estimated. If its `Operator.range` range does not coincide with its `Operator.domain`, an `Operator.adjoint` must be defined (which implies that the operator must be linear). xstart : ``op.domain`` `element-like`, optional Starting point of the iteration. By default an `Operator.domain` element containing noise is used. maxiter : positive int, optional Number of iterations to perform. If the domain and range of ``op`` do not match, it needs to be an even number. If ``None`` is given, iterate until convergence. rtol : float, optional Relative tolerance parameter (see Notes). atol : float, optional Absolute tolerance parameter (see Notes). callback : callable, optional Function called with the current iterate in each iteration. Returns ------- est_opnorm : float The estimated operator norm of ``op``. Examples -------- Verify that the identity operator has norm close to 1: >>> space = odl.uniform_discr(0, 1, 5) >>> id = odl.IdentityOperator(space) >>> estimation = power_method_opnorm(id) >>> round(estimation, ndigits=3) 1.0 Notes ----- The operator norm :math:`||A||` is defined by as the smallest number such that .. math:: ||A(x)|| \leq ||A|| ||x|| for all :math:`x` in the domain of :math:`A`. The operator is evaluated until ``maxiter`` operator calls or until the relative error is small enough. The error measure is given by ``abs(a - b) <= (atol + rtol * abs(b))``, where ``a`` and ``b`` are consecutive iterates. """ if maxiter is None: maxiter = np.iinfo(int).max maxiter, maxiter_in = int(maxiter), maxiter if maxiter <= 0: raise ValueError('`maxiter` must be positive, got {}' ''.format(maxiter_in)) if op.domain == op.range: use_normal = False ncalls = maxiter else: # Do the power iteration for A*A; the norm of A*A(x_N) is then # an estimate of the square of the operator norm # We do only half the number of iterations compared to the usual # case to have the same number of operator evaluations. use_normal = True ncalls = maxiter // 2 if ncalls * 2 != maxiter: raise ValueError('``maxiter`` must be an even number for ' 'non-self-adjoint operator, got {}' ''.format(maxiter_in)) # Make sure starting point is ok or select initial guess if xstart is None: x = noise_element(op.domain) else: # copy to ensure xstart is not modified x = op.domain.element(xstart).copy() # Take first iteration step to normalize input x_norm = x.norm() if x_norm == 0: raise ValueError('``xstart`` must be nonzero') x /= x_norm # utility to calculate opnorm from xnorm def calc_opnorm(x_norm): if use_normal: return np.sqrt(x_norm) else: return x_norm # initial guess of opnorm opnorm = calc_opnorm(x_norm) # temporary to improve performance tmp = op.range.element() # Use the power method to estimate opnorm for i in range(ncalls): if use_normal: op(x, out=tmp) op.adjoint(tmp, out=x) else: op(x, out=tmp) x, tmp = tmp, x # Calculate x norm and verify it is valid x_norm = x.norm() if x_norm == 0: raise ValueError('reached ``x=0`` after {} iterations'.format(i)) if not np.isfinite(x_norm): raise ValueError('reached nonfinite ``x={}`` after {} iterations' ''.format(x, i)) # Calculate opnorm opnorm, opnorm_old = calc_opnorm(x_norm), opnorm # If the breaking condition holds, stop. Else rescale and go on. if np.isclose(opnorm, opnorm_old, rtol, atol): break else: x /= x_norm if callback is not None: callback(x) return opnorm
python
{ "resource": "" }
q35292
as_scipy_operator
train
def as_scipy_operator(op): """Wrap ``op`` as a ``scipy.sparse.linalg.LinearOperator``. This is intended to be used with the scipy sparse linear solvers. Parameters ---------- op : `Operator` A linear operator that should be wrapped Returns ------- ``scipy.sparse.linalg.LinearOperator`` : linear_op The wrapped operator, has attributes ``matvec`` which calls ``op``, and ``rmatvec`` which calls ``op.adjoint``. Examples -------- Wrap operator and solve simple problem (here toy problem ``Ix = b``) >>> op = odl.IdentityOperator(odl.rn(3)) >>> scipy_op = as_scipy_operator(op) >>> import scipy.sparse.linalg as scipy_solvers >>> result, status = scipy_solvers.cg(scipy_op, [0, 1, 0]) >>> result array([ 0., 1., 0.]) Notes ----- If the data representation of ``op``'s domain and range is of type `NumpyTensorSpace` this incurs no significant overhead. If the space type is ``CudaFn`` or some other nonlocal type, the overhead is significant. """ # Lazy import to improve `import odl` time import scipy.sparse if not op.is_linear: raise ValueError('`op` needs to be linear') dtype = op.domain.dtype if op.range.dtype != dtype: raise ValueError('dtypes of ``op.domain`` and ``op.range`` needs to ' 'match') shape = (native(op.range.size), native(op.domain.size)) def matvec(v): return (op(v.reshape(op.domain.shape))).asarray().ravel() def rmatvec(v): return (op.adjoint(v.reshape(op.range.shape))).asarray().ravel() return scipy.sparse.linalg.LinearOperator(shape=shape, matvec=matvec, rmatvec=rmatvec, dtype=dtype)
python
{ "resource": "" }
q35293
as_scipy_functional
train
def as_scipy_functional(func, return_gradient=False): """Wrap ``op`` as a function operating on linear arrays. This is intended to be used with the `scipy solvers <https://docs.scipy.org/doc/scipy/reference/optimize.html>`_. Parameters ---------- func : `Functional`. A functional that should be wrapped return_gradient : bool, optional ``True`` if the gradient of the functional should also be returned, ``False`` otherwise. Returns ------- function : ``callable`` The wrapped functional. gradient : ``callable``, optional The wrapped gradient. Only returned if ``return_gradient`` is true. Examples -------- Wrap functional and solve simple problem (here toy problem ``min_x ||x||^2``): >>> func = odl.solvers.L2NormSquared(odl.rn(3)) >>> scipy_func = odl.as_scipy_functional(func) >>> from scipy.optimize import minimize >>> result = minimize(scipy_func, x0=[0, 1, 0]) >>> np.allclose(result.x, [0, 0, 0]) True The gradient (jacobian) can also be provided: >>> func = odl.solvers.L2NormSquared(odl.rn(3)) >>> scipy_func, scipy_grad = odl.as_scipy_functional(func, True) >>> from scipy.optimize import minimize >>> result = minimize(scipy_func, x0=[0, 1, 0], jac=scipy_grad) >>> np.allclose(result.x, [0, 0, 0]) True Notes ----- If the data representation of ``op``'s domain is of type `NumpyTensorSpace`, this incurs no significant overhead. If the space type is ``CudaFn`` or some other nonlocal type, the overhead is significant. """ def func_call(arr): return func(np.asarray(arr).reshape(func.domain.shape)) if return_gradient: def func_gradient_call(arr): return np.asarray( func.gradient(np.asarray(arr).reshape(func.domain.shape))) return func_call, func_gradient_call else: return func_call
python
{ "resource": "" }
q35294
as_proximal_lang_operator
train
def as_proximal_lang_operator(op, norm_bound=None): """Wrap ``op`` as a ``proximal.BlackBox``. This is intended to be used with the `ProxImaL language solvers. <https://github.com/comp-imaging/proximal>`_ For documentation on the proximal language (ProxImaL) see [Hei+2016]. Parameters ---------- op : `Operator` Linear operator to be wrapped. Its domain and range must implement ``shape``, and elements in these need to implement ``asarray``. norm_bound : float, optional An upper bound on the spectral norm of the operator. Note that this is the norm as defined by ProxImaL, and hence use the unweighted spaces. Returns ------- ``proximal.BlackBox`` : proximal_lang_operator The wrapped operator. Notes ----- If the data representation of ``op``'s domain and range is of type `NumpyTensorSpace` this incurs no significant overhead. If the data space is implemented with CUDA or some other non-local representation, the overhead is significant. References ---------- [Hei+2016] Heide, F et al. *ProxImaL: Efficient Image Optimization using Proximal Algorithms*. ACM Transactions on Graphics (TOG), 2016. """ # TODO: use out parameter once "as editable array" is added def forward(inp, out): out[:] = op(inp).asarray() def adjoint(inp, out): out[:] = op.adjoint(inp).asarray() import proximal return proximal.LinOpFactory(input_shape=op.domain.shape, output_shape=op.range.shape, forward=forward, adjoint=adjoint, norm_bound=norm_bound)
python
{ "resource": "" }
q35295
skimage_sinogram_space
train
def skimage_sinogram_space(geometry, volume_space, sinogram_space): """Create a range adapted to the skimage radon geometry.""" padded_size = int(np.ceil(volume_space.shape[0] * np.sqrt(2))) det_width = volume_space.domain.extent[0] * np.sqrt(2) skimage_detector_part = uniform_partition(-det_width / 2.0, det_width / 2.0, padded_size) skimage_range_part = geometry.motion_partition.insert( 1, skimage_detector_part) skimage_range = uniform_discr_frompartition(skimage_range_part, interp=sinogram_space.interp, dtype=sinogram_space.dtype) return skimage_range
python
{ "resource": "" }
q35296
clamped_interpolation
train
def clamped_interpolation(skimage_range, sinogram): """Interpolate in a possibly smaller space. Sets all points that would be outside the domain to match the boundary values. """ min_x = skimage_range.domain.min()[1] max_x = skimage_range.domain.max()[1] def interpolation_wrapper(x): x = (x[0], np.maximum(min_x, np.minimum(max_x, x[1]))) return sinogram.interpolation(x) return interpolation_wrapper
python
{ "resource": "" }
q35297
ScalingOperator._call
train
def _call(self, x, out=None): """Scale ``x`` and write to ``out`` if given.""" if out is None: out = self.scalar * x else: out.lincomb(self.scalar, x) return out
python
{ "resource": "" }
q35298
ScalingOperator.inverse
train
def inverse(self): """Return the inverse operator. Examples -------- >>> r3 = odl.rn(3) >>> vec = r3.element([1, 2, 3]) >>> op = ScalingOperator(r3, 2.0) >>> inv = op.inverse >>> inv(op(vec)) == vec True >>> op(inv(vec)) == vec True """ if self.scalar == 0.0: raise ZeroDivisionError('scaling operator not invertible for ' 'scalar==0') return ScalingOperator(self.domain, 1.0 / self.scalar)
python
{ "resource": "" }
q35299
ScalingOperator.adjoint
train
def adjoint(self): """Adjoint, given as scaling with the conjugate of the scalar. Examples -------- In the real case, the adjoint is the same as the operator: >>> r3 = odl.rn(3) >>> x = r3.element([1, 2, 3]) >>> op = ScalingOperator(r3, 2) >>> op(x) rn(3).element([ 2., 4., 6.]) >>> op.adjoint(x) # The same rn(3).element([ 2., 4., 6.]) In the complex case, the scalar is conjugated: >>> c3 = odl.cn(3) >>> x_complex = c3.element([1, 1j, 1-1j]) >>> op = ScalingOperator(c3, 1+1j) >>> expected_op = ScalingOperator(c3, 1-1j) >>> op.adjoint(x_complex) cn(3).element([ 1.-1.j, 1.+1.j, 0.-2.j]) >>> expected_op(x_complex) # The same cn(3).element([ 1.-1.j, 1.+1.j, 0.-2.j]) Returns ------- adjoint : `ScalingOperator` ``self`` if `scalar` is real, else `scalar` is conjugated. """ if complex(self.scalar).imag == 0.0: return self else: return ScalingOperator(self.domain, self.scalar.conjugate())
python
{ "resource": "" }