diff --git a/examples/tomo/ray_trafo_parallel_2d.py b/examples/tomo/ray_trafo_parallel_2d.py index 0ba70384559..625c77b16d0 100644 --- a/examples/tomo/ray_trafo_parallel_2d.py +++ b/examples/tomo/ray_trafo_parallel_2d.py @@ -28,7 +28,7 @@ # projection data (or any element in the projection space). backproj = ray_trafo.adjoint(proj_data) -# Shows a slice of the phantom, projections, and reconstruction +# Show a slice of the phantom, projections, and reconstruction phantom.show(title='Phantom') proj_data.show(title='Projection data (sinogram)') backproj.show(title='Back-projected data', force_show=True) diff --git a/odl/discr/discr_mappings.py b/odl/discr/discr_mappings.py index db5ed94fa6c..e6da641992d 100644 --- a/odl/discr/discr_mappings.py +++ b/odl/discr/discr_mappings.py @@ -79,10 +79,11 @@ def __init__(self, map_type, fspace, partition, tspace, linear=False): 'of the function set {}' ''.format(partition, fspace.domain, fspace)) - if tspace.shape != partition.shape: - raise ValueError('`tspace.shape` not equal to `partition.shape`: ' - '{} != {}' - ''.format(tspace.shape, partition.shape)) + if tspace.shape != fspace.shape_out + partition.shape: + raise ValueError( + '`tspace.shape` not equal to ' + '`fspace.shape_out + partition.shape`: {} != {}' + ''.format(tspace.shape, fspace.shape_out + partition.shape)) domain = fspace if map_type == 'sampling' else tspace range = tspace if map_type == 'sampling' else fspace @@ -323,7 +324,7 @@ def __init__(self, fspace, partition, tspace, variant='left'): data type in 2d: >>> rect = odl.IntervalProd([0, 0], [1, 1]) - >>> fspace = odl.FunctionSpace(rect, out_dtype='U1') + >>> fspace = odl.FunctionSpace(rect, dtype_out='U1') Partitioning the domain uniformly with no nodes on the boundary (will shift the grid points): diff --git a/odl/discr/discr_ops.py b/odl/discr/discr_ops.py index b1715a4af1f..0b1f24c60e8 100644 --- a/odl/discr/discr_ops.py +++ b/odl/discr/discr_ops.py @@ -524,7 +524,7 @@ def _resize_discr(discr, newshp, offset, discr_kwargs): new_maxpt.append(grid_max[axis] + (num_r + 0.5) * cell_size[axis]) fspace = FunctionSpace(IntervalProd(new_minpt, new_maxpt), - out_dtype=dtype) + dtype_out=dtype) tspace = tensor_space(newshp, dtype=dtype, impl=impl, exponent=exponent, weighting=weighting) diff --git a/odl/discr/discretization.py b/odl/discr/discretization.py index 0f12dfa1046..fdf7f586665 100644 --- a/odl/discr/discretization.py +++ b/odl/discr/discretization.py @@ -375,6 +375,13 @@ def __eq__(self, other): def __getitem__(self, indices): """Return ``self[indices]``. + .. note:: + This is a basic implementation that does little more than + propagating the indexing to the `tensor` attribute. In + particular, the result will **not** be wrapped as a + `DiscretizedSpaceElement` again. Subclasses should take + care of that by overriding ``__getitem__``. + Parameters ---------- indices : int or `slice` @@ -417,9 +424,9 @@ def sampling(self, ufunc, **kwargs): Parameters ---------- ufunc : ``self.space.fspace`` element - The continuous function that should be samplingicted. + The continuous function that should be sampled. kwargs : - Additional arugments for the sampling operator implementation + Additional arguments for the sampling operator implementation. Examples -------- @@ -445,7 +452,7 @@ def interpolation(self): Returns ------- interpolation_op : `FunctionSpaceMapping` - Operatior representing a continuous interpolation of this + Operator representing a continuous interpolation of this element. Examples diff --git a/odl/discr/grid.py b/odl/discr/grid.py index c729c242340..56d2c2d6b85 100644 --- a/odl/discr/grid.py +++ b/odl/discr/grid.py @@ -18,7 +18,7 @@ from odl.set import Set, IntervalProd from odl.util import ( normalized_index_expression, normalized_scalar_param_list, safe_int_conv, - array_str, signature_string, indent, npy_printoptions) + array_str, signature_string, indent, npy_printoptions, array_hash) __all__ = ('RectGrid', 'uniform_grid', 'uniform_grid_fromintv') @@ -521,8 +521,9 @@ def __eq__(self, other): def __hash__(self): """Return ``hash(self)``.""" # TODO: update with #841 - coord_vec_str = tuple(cv.tobytes() for cv in self.coord_vectors) - return hash((type(self), coord_vec_str)) + return hash( + (type(self), tuple(array_hash(cv) for cv in self.coord_vectors)) + ) def approx_contains(self, other, atol): """Test if ``other`` belongs to this grid up to a tolerance. diff --git a/odl/discr/lp_discr.py b/odl/discr/lp_discr.py index b7a1ed40a81..351b811a98b 100644 --- a/odl/discr/lp_discr.py +++ b/odl/discr/lp_discr.py @@ -9,7 +9,7 @@ """Lebesgue L^p type discretizations of function spaces.""" from __future__ import print_function, division, absolute_import -from numbers import Integral +from builtins import int import numpy as np from odl.discr.discretization import ( @@ -17,22 +17,25 @@ from odl.discr.discr_mappings import ( PointCollocation, NearestInterpolation, LinearInterpolation, PerAxisInterpolation) +from odl.discr.grid import uniform_grid from odl.discr.partition import ( - RectPartition, uniform_partition_fromintv, uniform_partition) + RectPartition, uniform_partition_fromintv, uniform_partition_fromgrid, + uniform_partition) from odl.set import RealNumbers, ComplexNumbers, IntervalProd from odl.space import FunctionSpace, ProductSpace from odl.space.entry_points import tensor_space_impl -from odl.space.weighting import ConstWeighting +from odl.space.weighting import ConstWeighting, PerAxisWeighting from odl.util import ( apply_on_boundary, is_real_dtype, is_complex_floating_dtype, is_string, - is_floating_dtype, is_numeric_dtype, + is_floating_dtype, is_numeric_dtype, is_int, dtype_str, array_str, signature_string, indent, npy_printoptions, - normalized_scalar_param_list, safe_int_conv, normalized_nodes_on_bdry) + normalized_scalar_param_list, safe_int_conv, normalized_nodes_on_bdry, + normalized_index_expression, simulate_slicing, normalized_axis_indices) __all__ = ('DiscreteLp', 'DiscreteLpElement', 'uniform_discr_frompartition', 'uniform_discr_fromspace', 'uniform_discr_fromintv', 'uniform_discr', - 'uniform_discr_fromdiscr', 'discr_sequence_space') + 'uniform_discr_fromdiscr') _SUPPORTED_INTERP = ('nearest', 'linear') @@ -85,10 +88,10 @@ def __init__(self, fspace, partition, tspace, interp='nearest', **kwargs): if not fspace.domain.contains_set(partition.set): raise ValueError('`partition` {} is not a subset of the function ' 'domain {}'.format(partition, fspace.domain)) - if fspace.scalar_out_dtype != tspace.dtype: - raise ValueError('`fspace.scalar_out_dtype` does not match ' + if fspace.scalar_dtype_out != tspace.dtype: + raise ValueError('`fspace.scalar_dtype_out` does not match ' '`tspace.dtype`: {} != {}' - ''.format(fspace.scalar_out_dtype, tspace.dtype)) + ''.format(fspace.scalar_dtype_out, tspace.dtype)) self.__partition = partition @@ -103,8 +106,8 @@ def __init__(self, fspace, partition, tspace, interp='nearest', **kwargs): else: # Got sequence of strings if len(interp) != partition.ndim: - raise ValueError('expected {} (ndim) entries in `interp`, ' - 'got {}'.format(partition.ndim, len(interp))) + raise ValueError('expected {} entries in `interp`, got {}' + ''.format(partition.ndim, len(interp))) self.__interp_byaxis = tuple(str(s).lower() for s in interp) if any(s not in _SUPPORTED_INTERP for s in self.interp_byaxis): @@ -126,12 +129,19 @@ def __init__(self, fspace, partition, tspace, interp='nearest', **kwargs): # Set axis labels axis_labels = kwargs.pop('axis_labels', None) if axis_labels is None: - if self.ndim <= 3: - self.__axis_labels = ('$x$', '$y$', '$z$')[:self.ndim] + axis_labels_out = tuple('$i_{}$'.format(ax) + for ax in range(self.ndim_out)) + if self.ndim_in <= 3: + axis_labels_in = ('$x$', '$y$', '$z$')[:self.ndim_in] else: - self.__axis_labels = tuple('$x_{}$'.format(axis) - for axis in range(self.ndim)) + axis_labels_in = tuple('$x_{}$'.format(ax) + for ax in range(self.ndim_in)) + self.__axis_labels = axis_labels_out + axis_labels_in else: + if len(axis_labels) != self.ndim: + raise ValueError( + '`axis_labels` should be equal to the number of axes, ' + 'but {} != {}'.format(len(axis_labels), self.ndim)) self.__axis_labels = tuple(str(label) for label in axis_labels) if kwargs: @@ -141,7 +151,7 @@ def __init__(self, fspace, partition, tspace, interp='nearest', **kwargs): @property def interp(self): """Interpolation type of this discretization.""" - if self.ndim == 0: + if self.ndim_in == 0: return 'nearest' elif all(interp == self.interp_byaxis[0] for interp in self.interp_byaxis): @@ -196,18 +206,92 @@ def grid(self): @property def shape(self): - """Shape of the underlying partition.""" + """Shape of the underlying tensor space. + + For spaces of discretized vector- or tensor-valued functions, + this includes the output components as :: + + shape = fspace.shape_out + partition.shape + + Examples + -------- + >>> space = odl.uniform_discr(0, 1, 4, dtype=(float, (2, 3))) + >>> space.shape + (2, 3, 4) + """ + return self.tspace.shape + + @property + def shape_in(self): + """Shape of the "input" part, i.e. of the `partition`. + + For spaces of discretized scalar-valued functions, this is the + same as `shape`. For vector- or tensor-valued functions, this + is the shape of *one component*. + + Examples + -------- + >>> space = odl.uniform_discr(0, 1, 4, dtype=(float, (2, 3))) + >>> space.shape_in + (4,) + """ return self.partition.shape + @property + def ndim_in(self): + """Number of "input" dimensions, equals ``len(self.shape_in)``.""" + return len(self.shape_in) + + @property + def shape_out(self): + """Shape of the "output" part, i.e. of the `fspace`. + + For spaces of discretized vector- or tensor-valued functions, + this is the shape of a function value; for scalar-valued functions, + it is an empty tuple. + + Examples + -------- + >>> space = odl.uniform_discr(0, 1, 4, dtype=(float, (2, 3))) + >>> space.shape_out + (2, 3) + """ + return self.fspace.shape_out + + @property + def ndim_out(self): + """Number of "output" dimensions, equals ``len(self.shape_out)``.""" + return len(self.shape_out) + @property def ndim(self): - """Number of dimensions (= number of axes).""" - return self.partition.ndim + """Total number of dimensions, including input and output.""" + return self.ndim_in + self.ndim_out + + @property + def dtype(self): + """Data type of this space. + + For spaces of discretized vector- or tensor-valued functions, + the data type has a shape. For the base data type without shape, + use `scalar_dtype`. + """ + # Construct explicitly since `fspace` may have dtype `None` + return np.dtype((self.tspace.dtype, self.fspace.shape_out)) + + @property + def scalar_dtype(self): + """Scalar data type of this space. + + For spaces of discretized vector- or tensor-valued functions, + this is the variant of `dtype` without a shape. + """ + return self.tspace.dtype @property def size(self): - """Total number of underlying partition cells.""" - return self.partition.size + """Total number of entries in the underlying tensor space.""" + return self.tspace.size @property def cell_sides(self): @@ -257,10 +341,10 @@ def tangent_bundle(self): This space can be identified with the power space ``X^d`` as used in this implementation. """ - if self.ndim == 0: + if self.ndim_in == 0: return ProductSpace(field=self.field) else: - return ProductSpace(self, self.ndim) + return ProductSpace(self, self.ndim_in) @property def is_uniformly_weighted(self): @@ -375,19 +459,37 @@ def element(self, inp=None, order=None, **kwargs): self, self.tspace.element(inp, order=order)) def _astype(self, dtype): - """Internal helper for ``astype``.""" + """Internal helper for ``astype``. + + Subclasses with different constructor signature should override this + method. + """ + dtype = np.dtype(dtype) fspace = self.fspace.astype(dtype) - tspace = self.tspace.astype(dtype) + + # If `dtype` has a shape, we remove the old output axes and insert + # the new ones + if dtype.shape == (): + tspace = self.tspace.astype(dtype) + axis_labels_new = self.axis_labels + else: + tspace = self.tspace.byaxis[self.ndim_out:].newaxis(0, dtype.shape) + tspace = tspace.astype(dtype.base) + axis_labels_extra = tuple('$i_{}$'.format(ax) + for ax in range(len(dtype.shape))) + axis_labels_new = (axis_labels_extra + + self.axis_labels[self.ndim_out:]) + return type(self)( fspace, self.partition, tspace, interp=self.interp, - axis_labels=self.axis_labels) + axis_labels=axis_labels_new) # Overrides for space functions depending on partition # - # The inherited methods by default use a weighting by a constant + # The inherited methods by default use weighting by a constant # (the grid cell size). In dimensions where the partitioned set contains # only a fraction of the outermost cells (e.g. if the outermost grid - # points lie at the boundary), the corresponding contribuitons to + # points lie at the boundary), the corresponding contributions to # discretized integrals need to be scaled by that fraction. def _inner(self, x, y): """Return ``self.inner(x, y)``.""" @@ -424,8 +526,280 @@ def _dist(self, x, y): else: return super(DiscreteLp, self)._dist(x, y) - # TODO: add byaxis_out when discretized tensor-valued functions are - # available + def __getitem__(self, indices): + """Return ``self[indices]``. + + For most cases, indexing is implemented such that for an element + ``x in space``, it holds :: + + x[indices] in space[indices] + + Space indexing does not work with index arrays or "fancy indexing". + + Notes + ----- + New axes can be created, however not within the axes that + correspond to the function domain, only as new output axes. + In other words, a scalar function can be turned into a + ``(1, ..., 1)`` tensor-valued function this way by adding new axes + of size 1, or a vector-valued function into a tensor-valued one + with sizes 1 in the new axes. This is useful for broadcasting, + see Examples. + + Examples + -------- + A single integer slices along the first axis (index does not + matter as long as it lies within the bounds): + + >>> space = odl.uniform_discr([0, 0, 0, 0], [1, 2, 3, 4], + ... shape=(2, 4, 6, 8)) + >>> space[0] + uniform_discr([ 0., 0., 0.], [ 2., 3., 4.], (4, 6, 8)) + >>> space[1] + uniform_discr([ 0., 0., 0.], [ 2., 3., 4.], (4, 6, 8)) + + Multiple indices slice into corresponding axes from the left: + + >>> space[0, 0] + uniform_discr([ 0., 0.], [ 3., 4.], (6, 8)) + >>> space[0, 1, -1] + uniform_discr(0.0, 4.0, 8) + + Ellipsis (``...``) and ``slice(None)`` (``:``) can be used to keep + one or several axes intact: + + >>> space[0, :, 1, :] + uniform_discr([ 0., 0.], [ 2., 4.], (4, 8)) + >>> space[..., 0, 0] + uniform_discr([ 0., 0.], [ 1., 2.], (2, 4)) + + With slices, parts of axes can be selected: + + >>> space[0, :3, 1:4, :] + uniform_discr([ 0. , 0.5, 0. ], [ 1.5, 2. , 4. ], (3, 3, 8)) + + Indexing is also supported for vector- or tensor-valued function + spaces. In that case, leftmost indices are associated with the + output dimensions, rightmost indices with the input (domain) + dimensions: + + >>> space = odl.uniform_discr(0, 1, 5, dtype=(float, (2, 3))) + >>> space.shape + (2, 3, 5) + >>> space[0] # (2, 3)-valued -> (3,)-valued (first row) + uniform_discr(0.0, 1.0, 5, dtype=('float64', (3,))) + >>> space[:, 0] # (2, 3)-valued -> (2,)-valued (first column) + uniform_discr(0.0, 1.0, 5, dtype=('float64', (2,))) + >>> space[..., :2] # last axis = domain + uniform_discr(0.0, 0.4, 2, dtype=('float64', (2, 3))) + """ + # Unwrap tensor for passing on to underlying indexing methods + if isinstance(indices, self.element_type): + indices = indices.tensor + + if getattr(indices, 'dtype', object) == bool: + # Raising here as signal for `DiscreteLpElement.__getitem__` + # This is an optimization that avoids counting the number of + # `True` entries here *and* in the actual element indexing + # operation. We do it lazily instead. + raise TypeError('cannot index space with a boolean element or ' + 'array') + + # The overall logic of the slicing is as follows: + # - indices are normalized to be a tuple of length `ndim` + # - the `tspace` is indexed with the whole index tuple + # - the `partition` is indexed with the "in" part of the tuple only, + # i.e., the last part excluding `n_out = len(shape_out)` axes + # - the `shape_out` for the function space is determined using the + # first `n_out` index tuple entries + # - the new function space is constructed from `new_partition.set`, + # `fspace.scalar_dtype_out` and the new `shape_out` + + # Normalize the index expression based on the full shape, and + # split into "in" and "out" parts + indices = normalized_index_expression(indices, self.shape) + + # New axes can be added to `indices_out`, so we can only use + # `indices_in` as fixed in length + indices_out = indices[:len(indices) - self.ndim_in] + indices_in = indices[len(indices) - self.ndim_in:] + # Now `indices_in` shouldn't contain any `None` since we don't + # allow new axes there; we cannot use `indices.contains()` for + # that check since it uses the `==` operator which will fail if + # one of the indices is a Numpy array + if any(idx is None for idx in indices_in): + raise ValueError('new axes can be created only with respect to ' + 'the "output" dimensions, see the documentation') + + # Index tensor space + res_tspace = self.tspace[indices] + + # Index partition, manually removing collapsed axes + res_part = self.partition[indices_in] + _, collapsed_axes_in, _, _ = simulate_slicing( + self.shape_in, indices_in) + remaining_axes_in = [i for i in range(self.ndim_in) + if i not in collapsed_axes_in] + res_part = res_part.byaxis[remaining_axes_in] + + # Determine new fspace + sliced_shape_out, collapsed_axes_out, new_axes, _ = simulate_slicing( + self.shape_out, indices_out) + res_fspace = FunctionSpace( + res_part.set, + dtype_out=(self.fspace.scalar_dtype_out, sliced_shape_out)) + + remaining_axes_out = [i for i in range(self.ndim_out) + if i not in collapsed_axes_out] + remaining_axes = (remaining_axes_out + + [self.ndim_out + i for i in remaining_axes_in]) + # Further attributes for the new space + res_interp = [self.interp_byaxis[i] for i in range(self.ndim_in) + if i in remaining_axes_in] + res_labels = [self.axis_labels[i] for i in range(self.ndim) + if i in remaining_axes] + for i, ax in enumerate(new_axes): + res_labels.insert(ax, '$new_{}$'.format(i)) + + # Create new space + return DiscreteLp(res_fspace, res_part, res_tspace, interp=res_interp, + axis_labels=res_labels) + + @property + def byaxis(self): + """Object to index along (input and output) axes. + + .. note:: + - Output dimensions come first (function components), then + input dimensions (function domain). + - If the index expression does not contain an index from the + "in" or "out" parts, the result will have no axes in that part. + For output dimensions, this means that the resulting space will + be scalar-valued, for input dimensions, the domain will be + empty, which is usually not desired. + - If a list of integers is given, the respective axes are + "stacked" as desired. However, it is not possible to mix + output with input axes. + + Examples + -------- + Indexing with integers or slices: + + >>> space = odl.uniform_discr([0, 0, 0], [1, 2, 3], (5, 10, 15), + ... dtype=(float, (2, 3))) + >>> space.byaxis[0] + uniform_discr([], [], (), dtype=('float64', (2,))) + >>> space.byaxis[[0, 2, 3]] + uniform_discr([ 0., 0.], [ 1., 2.], (5, 10), dtype=('float64', (2,))) + >>> space.byaxis[1:3] + uniform_discr(0.0, 1.0, 5, dtype=('float64', (3,))) + >>> space.byaxis[2] + uniform_discr(0.0, 1.0, 5) + + Lists can be used to stack spaces arbitrarily (as long as "in" and + "out" axes are not mixed): + + >>> space.byaxis[[1, 2, 2]] + uniform_discr([ 0., 0.], [ 1., 1.], (5, 5), dtype=('float64', (3,))) + """ + space = self + + class DiscreteLpByaxis(object): + + """Helper class for indexing by axes.""" + + def __getitem__(self, indices): + """Return ``self[indices]``. + + Parameters + ---------- + indices : index expression + Object used to index the space. + + Returns + ------- + space : `DiscreteLp` + The resulting space after indexing along axes, with + roughly the same properties except possibly weighting. + """ + from odl.space.fspace import _out_in_indices_overlap + + indices, indices_in = ( + normalized_axis_indices(indices, space.ndim), indices) + if _out_in_indices_overlap(indices, space.ndim_out): + raise ValueError( + '`indices` {} has overlapping output and input ' + 'axes'.format(indices_in)) + + idcs_in = [i - space.ndim_out for i in indices + if i >= space.ndim_out] + + fspace = space.fspace.byaxis[indices] + part = space.partition.byaxis[idcs_in] + tspace = space.tspace.byaxis[indices] + + interp = tuple(space.interp_byaxis[int(i)] + for i in idcs_in) + labels = tuple(space.axis_labels[int(i)] + for i in indices) + + return DiscreteLp(fspace, part, tspace, interp, + axis_labels=labels) + + def __repr__(self): + """Return ``repr(self)``.""" + return repr(space) + '.byaxis' + + return DiscreteLpByaxis() + + @property + def byaxis_out(self): + """Object to index along output (tensor component) dimensions. + + Examples + -------- + Indexing with integers or slices: + + >>> space = odl.uniform_discr(0, 1, 5, dtype=(float, (2, 3, 4))) + >>> space.byaxis_out[0] + uniform_discr(0.0, 1.0, 5, dtype=('float64', (2,))) + >>> space.byaxis_out[1:] + uniform_discr(0.0, 1.0, 5, dtype=('float64', (3, 4))) + + Lists can be used to stack spaces arbitrarily: + + >>> space.byaxis_out[[2, 1, 2]] + uniform_discr(0.0, 1.0, 5, dtype=('float64', (4, 3, 4))) + """ + space = self + + class DiscreteLpByaxisOut(object): + + """Helper class for indexing by output axes.""" + + def __getitem__(self, indices): + """Return ``self[indices]``. + + Parameters + ---------- + indices : index expression + Object used to index the output axes. + + Returns + ------- + space : `DiscreteLp` + The resulting space with indexed output components and + otherwise same properties (except possibly weighting). + """ + idcs_out = normalized_axis_indices(indices, space.ndim_out) + idcs_in = tuple(range(space.ndim_out, space.ndim)) + return space.byaxis[idcs_out + idcs_in] + + def __repr__(self): + """Return ``repr(self)``.""" + return repr(space) + '.byaxis_out' + + return DiscreteLpByaxisOut() @property def byaxis_in(self): @@ -468,41 +842,10 @@ def __getitem__(self, indices): The resulting space with indexed domain and otherwise same properties (except possibly weighting). """ - fspace = space.fspace.byaxis_in[indices] - part = space.partition.byaxis[indices] - - if isinstance(space.weighting, ConstWeighting): - # Need to manually construct `tspace` since it doesn't - # know where its weighting factor comes from - try: - iter(indices) - except TypeError: - newshape = space.shape[indices] - else: - newshape = tuple(space.shape[int(i)] for i in indices) - - weighting = part.cell_volume - tspace = type(space.tspace)( - newshape, space.dtype, - exponent=space.exponent, weighting=weighting) - else: - # Other weighting schemes are handled correctly by - # the tensor space - tspace = space.tspace.byaxis[indices] - - try: - iter(indices) - except TypeError: - interp = space.interp_byaxis[indices] - labels = space.axis_labels[indices] - else: - interp = tuple(space.interp_byaxis[int(i)] - for i in indices) - labels = tuple(space.axis_labels[int(i)] - for i in indices) - - return DiscreteLp(fspace, part, tspace, interp, - axis_labels=labels) + indices = normalized_axis_indices(indices, space.ndim_in) + idcs_out = list(range(space.ndim_out)) + idcs_in = [i + space.ndim_out for i in indices] + return space.byaxis[idcs_out + idcs_in] def __repr__(self): """Return ``repr(self)``.""" @@ -514,12 +857,12 @@ def __repr__(self): """Return ``repr(self)``.""" # Clunky check if the factory repr can be used if (uniform_partition_fromintv( - self.fspace.domain, self.shape, + self.fspace.domain, self.shape_in, nodes_on_bdry=False) == self.partition): use_uniform = True nodes_on_bdry = False elif (uniform_partition_fromintv( - self.fspace.domain, self.shape, + self.fspace.domain, self.shape_in, nodes_on_bdry=True) == self.partition): use_uniform = True nodes_on_bdry = True @@ -528,44 +871,71 @@ def __repr__(self): if use_uniform: ctor = 'uniform_discr' - if self.ndim == 1: - posargs = [self.min_pt[0], self.max_pt[0], self.shape[0]] + if self.ndim_in == 1: + posargs = [self.min_pt[0], self.max_pt[0], self.shape_in[0]] posmod = [':.4', ':.4', ''] else: - posargs = [self.min_pt, self.max_pt, self.shape] + posargs = [self.min_pt, self.max_pt, self.shape_in] posmod = [array_str, array_str, ''] default_dtype_s = dtype_str( self.tspace.default_dtype(RealNumbers())) - dtype_s = dtype_str(self.dtype) - optargs = [('interp', self.interp, 'nearest'), + optargs = [('dtype', dtype_s, default_dtype_s), + ('interp', self.interp, 'nearest'), ('impl', self.impl, 'numpy'), - ('nodes_on_bdry', nodes_on_bdry, False), - ('dtype', dtype_s, default_dtype_s)] + ('nodes_on_bdry', nodes_on_bdry, False)] # Add weighting stuff if not equal to default - if (self.exponent == float('inf') or - self.ndim == 0 or - not is_floating_dtype(self.dtype)): - # In these cases, weighting constant 1 is the default - if (not isinstance(self.weighting, ConstWeighting) or - not np.isclose(self.weighting.const, 1.0)): - optargs.append(('weighting', self.weighting.const, None)) + default_factors_out = [1.0] * self.ndim_out + if self.exponent != 2.0: + weighting_part = self.weighting.repr_part + + elif (self.exponent == float('inf') or + self.ndim_in == 0 or + not is_floating_dtype(self.dtype)): + # In these cases, weighting factors 1 is the default + default_factors_in = [1.0] * self.ndim_in + default_factors = default_factors_out + default_factors_in + if ( + not isinstance(self.weighting, PerAxisWeighting) or + all(fac.ndim == 0 and np.isclose(fac, default_factors[i]) + for i, fac in enumerate(self.weighting.factors)) + ): + weighting_part = self.weighting.repr_part + else: - if (not isinstance(self.weighting, ConstWeighting) or - not np.isclose(self.weighting.const, - self.cell_volume)): - optargs.append(('weighting', self.weighting.const, None)) + # Here, per-axis weighting with the cell sides as factors is + # the default + default_factors_in = list(self.cell_sides) + default_factors = default_factors_out + default_factors_in + + if isinstance(self.weighting, ConstWeighting): + weighting_part = ('weighting={:.4}' + ''.format(self.weighting.const)) + elif (isinstance(self.weighting, PerAxisWeighting) and + all(fac.ndim == 0 and np.isclose(fac, default_factors[i]) + for i, fac in enumerate(self.weighting.factors))): + weighting_part = '' + else: + weighting_part = self.weighting.repr_part optmod = [''] * len(optargs) - if self.dtype in (float, complex, int, bool): - optmod[3] = '!s' + if self.dtype.base in (float, complex, int, bool): + optmod[0] = '!s' + + sep = ', ' if len(weighting_part) <= 30 else [', ', ',\n', ',\n'] with npy_printoptions(precision=4): - inner_str = signature_string(posargs, optargs, + inner_str = signature_string(posargs, optargs, sep=sep, mod=[posmod, optmod]) - return '{}({})'.format(ctor, inner_str) + if len(weighting_part) <= 30: + if weighting_part: + inner_str += ', ' + weighting_part + return '{}({})'.format(ctor, inner_str) + else: + inner_str += ',\n' + weighting_part + return '{}(\n{}\n)'.format(ctor, indent(inner_str)) else: ctor = self.__class__.__name__ @@ -577,7 +947,7 @@ def __repr__(self): sep=[',\n', ', ', ',\n'], mod=['!r', '!s']) - return '{}({})'.format(ctor, indent(inner_str)) + return '{}(\n{}\n)'.format(ctor, indent(inner_str)) def __str__(self): """Return ``str(self)``.""" @@ -593,6 +963,35 @@ class DiscreteLpElement(DiscretizedSpaceElement): """Representation of a `DiscreteLp` element.""" + @property + def shape_in(self): + """Shape of the "input" part, i.e. of the function domain. + + For scalar-valued functions, this is the same as `shape`. For + vector- or tensor-valued functions, this is the shape of + *one component*. + """ + return self.space.shape_in + + @property + def ndim_in(self): + """Number of "input" dimensions, equals ``len(self.shape_in)``.""" + return self.space.ndim_in + + @property + def shape_out(self): + """Shape of the "output" part, i.e. of the function range. + + For vector- or tensor-valued functions, this is the shape of a + function value; for scalar-valued functions, it is an empty tuple. + """ + return self.space.shape_out + + @property + def ndim_out(self): + """Number of "output" dimensions, equals ``len(self.shape_out)``.""" + return self.space.ndim_out + @property def cell_sides(self): """Side lengths of a cell in an underlying *uniform* partition.""" @@ -759,28 +1158,141 @@ def conj(self, out=None): self.tensor.conj(out=out.tensor) return out - def __setitem__(self, indices, values): - """Set values of this element. + def __getitem__(self, indices): + """Return ``self[indices]``. Parameters ---------- - indices : int or `slice` - The position(s) that should be set - values : scalar or `array-like` - Value(s) to be assigned. - If ``indices`` is an integer, ``values`` must be a scalar - value. - If ``indices`` is a slice, ``values`` must be - broadcastable to the size of the slice (same size, - shape ``(1,)`` or scalar). - For ``indices == slice(None)``, i.e. in the call - ``vec[:] = values``, a multi-dimensional array of correct - shape is allowed as ``values``. + indices : index expression + The position(s) that should be accessed. + + Returns + ------- + values : `DiscreteLpElement` + The value(s) at the index (indices). + + Notes + ----- + New axes can be created, however not within the axes that + correspond to the function domain, only as new output axes. + In other words, a scalar function can be turned into a + ``(1, ..., 1)`` tensor-valued function this way by adding new axes + of size 1, or a vector-valued function into a tensor-valued one + with sizes 1 in the new axes. This is useful for broadcasting, + see Examples. + + Examples + -------- + Indexing of scalar-valued functions works very just as with + arrays: + + >>> space = odl.uniform_discr([0, 0, 0], [1, 2, 3], (2, 3, 4)) + >>> x = space.element( + ... [[[1, 2, 3, 4], + ... [2, 3, 4, 5], + ... [3, 4, 5, 6]], + ... [[4, 5, 6, 7], + ... [5, 6, 7, 8], + ... [6, 7, 8, 9]]] + ... ) + >>> x[0] + uniform_discr([ 0., 0.], [ 2., 3.], (3, 4)).element( + [[ 1., 2., 3., 4.], + [ 2., 3., 4., 5.], + [ 3., 4., 5., 6.]] + ) + >>> x[:, 1, 1:-1] + uniform_discr([ 0. , 0.75], [ 1. , 2.25], (2, 2)).element( + [[ 3., 4.], + [ 6., 7.]] + ) + + We can make a "fake" vector-valued function by adding a new axis + to the left (new axes are not allowed in the "domain" part): + + >>> x[None, :, 0, 0] + uniform_discr(0.0, 1.0, 2, dtype=('float64', (1,))).element( + [[ 1., 4.]] + ) + + Vector- and tensor-valued functions work similarly -- the only + notable difference here is that the first two axes are part of the + function output, i.e., we have a ``(2, 3)``-tensor-valued function: + + >>> space = odl.uniform_discr(0, 1, 4, dtype=(float, (2, 3))) + >>> x = space.element( + ... [[[1, 2, 3, 4], + ... [2, 3, 4, 5], + ... [3, 4, 5, 6]], + ... [[4, 5, 6, 7], + ... [5, 6, 7, 8], + ... [6, 7, 8, 9]]] + ... ) + >>> x[0] + uniform_discr(0.0, 1.0, 4, dtype=('float64', (3,))).element( + [[ 1., 2., 3., 4.], + [ 2., 3., 4., 5.], + [ 3., 4., 5., 6.]] + ) + >>> x[:, 1, 1:-1] + uniform_discr(0.25, 0.75, 2, dtype=('float64', (2,))).element( + [[ 3., 4.], + [ 6., 7.]] + ) + + Now we have more options to add new axes: + + >>> x[None, :, 0, 0] # collapses domain completely + uniform_discr([], [], (), dtype=('float64', (1, 2))).element( + [[ 1., 4.]] + ) + >>> x[None, 0, None, :, :] + uniform_discr(0.0, 1.0, 4, dtype=('float64', (1, 1, 3))).element( + [[[[ 1., 2., 3., 4.], + [ 2., 3., 4., 5.], + [ 3., 4., 5., 6.]]]] + ) """ - if values in self.space: - self.tensor[indices] = values.tensor + if isinstance(indices, type(self)): + indices = indices.tensor + if isinstance(indices, type(self.tensor)): + indices = indices.data + if isinstance(indices, type(self.tensor.data)) and not is_int(indices): + return self.tensor[indices] + + # Normalize the index expression based on the full shape, and + # split into "in" and "out" parts + # TODO(kohr-h): This step butchers "list of index lists" type + # of indices that we should make work. Think of a way around this + # issue. The `simulate_slicing` function likely has some of the + # required functionality. + indices = normalized_index_expression(indices, self.shape) + + # New axes can be added to the output part, so we can only use + # `indices_in` as fixed in length + indices_in = indices[len(indices) - self.space.ndim_in:] + # Now `indices_in` shouldn't contain any `None` since we don't + # allow new axes there; we cannot use `indices.contains()` for + # that check since it uses the `==` operator which will fail if + # one of the indices is a Numpy array + if any(idx is None for idx in indices_in): + raise ValueError('new axes can be created only with respect to ' + 'the "output" dimensions, see the documentation') + + res_tens = self.tensor[indices] + if np.isscalar(res_tens): + return res_tens + + try: + res_space = self.space[indices] + except (ValueError, TypeError): + # Cannot index the space like this, e.g., with an index array or + # a boolean array; just return the tensor + # TODO(kohr-h): Relying on ValueError and TypeError is a bit + # dangerous, there must be a better way + return res_tens else: - super(DiscreteLpElement, self).__setitem__(indices, values) + return res_space.element(res_tens) def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): """Interface to Numpy's ufunc machinery. @@ -1023,8 +1535,6 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): reduced_axes = [i for i in range(self.ndim) if i not in axis] - weighting = self.space.weighting - # --- Evaluate ufunc --- # if method == '__call__': @@ -1038,9 +1548,9 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # Make new function space based on result dtype, # keep everything else, and get `tspace` from the result # tensor. - out_dtype = (res_tens.dtype, self.space.fspace.out_shape) + dtype_out = (res_tens.dtype, self.space.fspace.shape_out) fspace = FunctionSpace(self.space.fspace.domain, - out_dtype) + dtype_out) res_space = DiscreteLp( fspace, self.space.partition, res_tens.space, self.space.interp_byaxis, @@ -1058,9 +1568,9 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): if out1 is None: # Wrap as for nout = 1 - out_dtype = (res1_tens.dtype, self.space.fspace.out_shape) + dtype_out = (res1_tens.dtype, self.space.fspace.shape_out) fspace = FunctionSpace(self.space.fspace.domain, - out_dtype) + dtype_out) res_space = DiscreteLp( fspace, self.space.partition, res1_tens.space, self.space.interp_byaxis, @@ -1071,9 +1581,9 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): if out2 is None: # Wrap as for nout = 1 - out_dtype = (res2_tens.dtype, self.space.fspace.out_shape) + dtype_out = (res2_tens.dtype, self.space.fspace.shape_out) fspace = FunctionSpace(self.space.fspace.domain, - out_dtype) + dtype_out) res_space = DiscreteLp( fspace, self.space.partition, res2_tens.space, self.space.interp_byaxis, @@ -1128,7 +1638,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # Make `fspace` with appropriate dtype, get `tspace` # from the result tensor and keep the rest fspace = FunctionSpace(self.space.domain, - out_dtype=res_tens.dtype) + dtype_out=res_tens.dtype) res_space = DiscreteLp( fspace, self.space.partition, res_tens.space, @@ -1141,7 +1651,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # and determine `tspace` from the result tensor inp1, inp2 = inputs domain = inp1.space.domain.append(inp2.space.domain) - fspace = FunctionSpace(domain, out_dtype=res_tens.dtype) + fspace = FunctionSpace(domain, dtype_out=res_tens.dtype) part = inp1.space.partition.append(inp2.space.partition) interp = (inp1.space.interp_byaxis + inp2.space.interp_byaxis) @@ -1149,24 +1659,9 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): labels2 = [lbl + ' (2)' for lbl in inp2.space.axis_labels] labels = labels1 + labels2 - if all(isinstance(inp.space.weighting, ConstWeighting) - for inp in inputs): - # For constant weighting, use the product of the - # two weighting constants. The result tensor space - # cannot know about the "correct" way to combine the - # two constants, so we need to do it manually here. - weighting = (inp1.space.weighting.const * - inp2.space.weighting.const) - tspace = type(res_tens.space)( - res_tens.shape, res_tens.dtype, - exponent=res_tens.space.exponent, - weighting=weighting) - else: - # Otherwise `TensorSpace` knows how to handle this - tspace = res_tens.space - res_space = DiscreteLp( - fspace, part, tspace, interp, axis_labels=labels) + fspace, part, res_tens.space, interp, + axis_labels=labels) result = res_space.element(res_tens) elif method == 'reduce': @@ -1326,7 +1821,7 @@ def show(self, title=None, method='', coords=None, indices=None, tuple(n // 2 for n in self.space.shape[2:])) # Normalize indices - if isinstance(indices, (Integral, slice)): + if is_int(indices) or isinstance(indices, slice): indices = (indices,) elif indices is None or indices == Ellipsis: indices = (slice(None),) * self.ndim @@ -1358,11 +1853,18 @@ def show(self, title=None, method='', coords=None, indices=None, for idx in indices) squeezed_axes = [axis for axis in range(self.ndim) - if not isinstance(indices[axis], Integral)] - axis_labels = [self.space.axis_labels[axis] for axis in squeezed_axes] - - # Squeeze grid and values according to the index expression - part = self.space.partition[indices].squeeze() + if is_int(indices[axis])] + axis_labels = [self.space.axis_labels[ax] + for ax in range(self.ndim) if ax not in squeezed_axes] + + # Extend partition (trivially) to output axes + part_out = uniform_partition_fromgrid( + uniform_grid([0] * self.space.ndim_out, self.space.shape_out, + self.space.shape_out)) + part = part_out.append(self.space.partition) + + # Squeeze partition and values according to the index expression + part = part[indices].squeeze() values = self.asarray()[indices].squeeze() return show_discrete_data(values, part, title=title, method=method, @@ -1380,9 +1882,12 @@ def uniform_discr_frompartition(partition, dtype=None, impl='numpy', **kwargs): It defines the domain and the functions and the grid for discretization. dtype : optional - Data type for the discretized space, must be understood by the - `numpy.dtype` constructor. The default for ``None`` depends on the - ``impl`` backend, usually it is ``'float64'`` or ``'float32'``. + Data type of the created space. Can be provided in any way the + `numpy.dtype` constructor understands, e.g. as built-in type or + as a string. + + To create a space of vector- or tensor-valued functions, + use a dtype with a shape, e.g., ``np.dtype((float, (2, 3)))``. impl : string, optional Implementation of the data storage arrays kwargs : @@ -1416,22 +1921,24 @@ def uniform_discr_frompartition(partition, dtype=None, impl='numpy', **kwargs): if dtype is not None: dtype = np.dtype(dtype) - fspace = FunctionSpace(partition.set, out_dtype=dtype) - ds_type = tspace_type(fspace, impl, dtype) + fspace = FunctionSpace(partition.set, dtype_out=dtype) + ds_type = tspace_type(fspace, impl, fspace.scalar_dtype_out) if dtype is None: dtype = ds_type.default_dtype() weighting = kwargs.pop('weighting', None) exponent = kwargs.pop('exponent', 2.0) - if weighting is None and is_numeric_dtype(dtype): - if exponent == float('inf') or partition.ndim == 0: - weighting = 1.0 - else: - weighting = partition.cell_volume - - tspace = ds_type(partition.shape, dtype, exponent=exponent, - weighting=weighting) + if ( + weighting is None and + is_numeric_dtype(dtype) and + exponent not in (float('inf'), -float('inf')) + ): + weighting = np.concatenate([np.ones(len(fspace.shape_out)), + partition.cell_sides]) + + tspace = ds_type(fspace.shape_out + partition.shape, dtype.base, + exponent=exponent, weighting=weighting) return DiscreteLp(fspace, partition, tspace, **kwargs) @@ -1461,8 +1968,8 @@ def uniform_discr_fromspace(fspace, shape, dtype=None, impl='numpy', **kwargs): Examples -------- >>> intv = odl.IntervalProd(0, 1) - >>> space = odl.FunctionSpace(intv) - >>> uniform_discr_fromspace(space, 10) + >>> fspace = odl.FunctionSpace(intv) + >>> odl.uniform_discr_fromspace(fspace, 10) uniform_discr(0.0, 1.0, 10) See Also @@ -1483,15 +1990,19 @@ def uniform_discr_fromspace(fspace, shape, dtype=None, impl='numpy', **kwargs): '`IntervalProd` instance'.format(fspace.domain)) # Set data type. If given, check consistency with fspace's field and - # out_dtype. If not given, take the latter. + # dtype_out. If not given, take the latter. if dtype is None: - dtype = fspace.out_dtype + dtype = fspace.dtype_out else: dtype, dtype_in = np.dtype(dtype), dtype - if not np.can_cast(fspace.scalar_out_dtype, dtype, casting='safe'): + if not np.can_cast(fspace.scalar_dtype_out, dtype, casting='safe'): raise ValueError('cannot safely cast from output data {} type of ' 'the function space to given data type {}' ''.format(fspace.out, dtype_in)) + if dtype.shape != fspace.shape_out: + raise ValueError('`dtype.shape` {} not equal to ' + '`fspace.shape_out` {}' + ''.format(dtype.shape, fspace.shape_out)) if fspace.field == RealNumbers() and not is_real_dtype(dtype): raise ValueError('cannot discretize real space {} with ' @@ -1504,9 +2015,7 @@ def uniform_discr_fromspace(fspace, shape, dtype=None, impl='numpy', **kwargs): ''.format(fspace, dtype)) nodes_on_bdry = kwargs.pop('nodes_on_bdry', False) - partition = uniform_partition_fromintv(fspace.domain, shape, - nodes_on_bdry) - + partition = uniform_partition_fromintv(fspace.domain, shape, nodes_on_bdry) return uniform_discr_frompartition(partition, dtype, impl, **kwargs) @@ -1551,7 +2060,7 @@ def uniform_discr_fromintv(intv_prod, shape, dtype=None, impl='numpy', if dtype is None: dtype = tensor_space_impl(str(impl).lower()).default_dtype() - fspace = FunctionSpace(intv_prod, out_dtype=dtype) + fspace = FunctionSpace(intv_prod, dtype_out=dtype) return uniform_discr_fromspace(fspace, shape, dtype, impl, **kwargs) @@ -1599,11 +2108,14 @@ def uniform_discr(min_pt, max_pt, shape, dtype=None, impl='numpy', **kwargs): Use weighted inner product, norm, and dist. The following types are supported as ``weighting``: - - ``None``: Use the cell volume as weighting constant (default). + - ``None``: Use the cell sides as weighting constants (default). - ``float``: Weighting by a constant. - - array-like: Point-wise weighting by an array. - - `Weighting`: Use weighting class as-is. Compatibility - with this space's elements is not checked during init. + - sequence of length ``ndim``: Separate weighting per axis. + Entries can be constants or 1D arrays. + - array-like: Pointwise weighting by an array of the same + ``shape`` as this space. + - `Weighting`: Use this weighting as-is. Compatibility + with this space is not checked during init. Returns ------- @@ -1647,45 +2159,6 @@ def uniform_discr(min_pt, max_pt, shape, dtype=None, impl='numpy', **kwargs): return uniform_discr_fromintv(intv_prod, shape, dtype, impl, **kwargs) -def discr_sequence_space(shape, dtype=None, impl='numpy', **kwargs): - """Return an object mimicing the sequence space ``l^p(R^d)``. - - The returned object is a `DiscreteLp` on the domain ``[0, shape - 1]``, - using a uniform grid with stride 1. - - Parameters - ---------- - shape : int or sequence of ints - Number of element entries per axis. - dtype : optional - Data type for the discretized space, must be understood by the - `numpy.dtype` constructor. The default for ``None`` depends on the - ``impl`` backend, usually it is ``'float64'`` or ``'float32'``. - impl : string, optional - Implementation of the data storage arrays. - kwargs : - Additional keyword parameters, see `uniform_discr` for details. - Note that ``nodes_on_bdry`` cannot be given. - - Returns - ------- - seqspc : `DiscreteLp` - Sequence-space-like discrete Lp. - - Examples - -------- - >>> seq_spc = discr_sequence_space((3, 3)) - >>> seq_spc.one().norm() == 3.0 - True - >>> seq_spc = discr_sequence_space((3, 3), exponent=1) - >>> seq_spc.one().norm() == 9.0 - True - """ - shape = np.atleast_1d(shape) - return uniform_discr([0] * len(shape), shape - 1, shape, dtype, impl, - nodes_on_bdry=True, **kwargs) - - def uniform_discr_fromdiscr(discr, min_pt=None, max_pt=None, shape=None, cell_sides=None, **kwargs): """Return a discretization based on an existing one. diff --git a/odl/operator/operator.py b/odl/operator/operator.py index 33d9b2acb52..42addf08ce0 100644 --- a/odl/operator/operator.py +++ b/odl/operator/operator.py @@ -11,12 +11,12 @@ from __future__ import print_function, division, absolute_import from builtins import object import inspect -from numbers import Number, Integral +from numbers import Number import sys from odl.set import LinearSpace, Set, Field from odl.set.space import LinearSpaceElement -from odl.util import cache_arguments +from odl.util import cache_arguments, is_int __all__ = ('Operator', 'OperatorComp', 'OperatorSum', 'OperatorVectorSum', @@ -986,7 +986,7 @@ def __pow__(self, n): >>> squared(x) rn(3).element([ 27., 54., 81.]) """ - if isinstance(n, Integral) and n > 0: + if is_int(n) and n > 0: op = self while n > 1: op = OperatorComp(self, op) diff --git a/odl/operator/pspace_ops.py b/odl/operator/pspace_ops.py index 149b260c556..199853e6e4a 100644 --- a/odl/operator/pspace_ops.py +++ b/odl/operator/pspace_ops.py @@ -9,12 +9,12 @@ """Default operators defined on any `ProductSpace`.""" from __future__ import print_function, division, absolute_import -from numbers import Integral import numpy as np from odl.operator.operator import Operator from odl.operator.default_ops import ZeroOperator from odl.space import ProductSpace +from odl.util import is_int __all__ = ('ProductSpaceOperator', @@ -770,8 +770,8 @@ def __init__(self, *operators): """ if (len(operators) == 2 and isinstance(operators[0], Operator) and - isinstance(operators[1], Integral)): - operators = (operators[0],) * operators[1] + is_int(operators[1])): + operators = (operators[0],) * int(operators[1]) self.__operators = operators self.__prod_op = ProductSpaceOperator([[op] for op in operators]) @@ -945,8 +945,8 @@ def __init__(self, *operators): """ if (len(operators) == 2 and isinstance(operators[0], Operator) and - isinstance(operators[1], Integral)): - operators = (operators[0],) * operators[1] + is_int(operators[1])): + operators = (operators[0],) * int(operators[1]) self.__operators = operators self.__prod_op = ProductSpaceOperator([operators]) @@ -1136,8 +1136,8 @@ def __init__(self, *operators, **kwargs): if (len(operators) == 2 and isinstance(operators[0], Operator) and - isinstance(operators[1], Integral)): - operators = (operators[0],) * operators[1] + is_int(operators[1])): + operators = (operators[0],) * int(operators[1]) n_ops = len(operators) irow = icol = np.arange(n_ops) diff --git a/odl/operator/tensor_ops.py b/odl/operator/tensor_ops.py index 6a6c55d6a9c..0132cab4923 100644 --- a/odl/operator/tensor_ops.py +++ b/odl/operator/tensor_ops.py @@ -9,7 +9,6 @@ """Operators defined for tensor fields.""" from __future__ import print_function, division, absolute_import -from numbers import Integral import numpy as np from packaging.version import parse as parse_version @@ -19,7 +18,8 @@ from odl.space.base_tensors import TensorSpace from odl.space.weighting import ArrayWeighting from odl.util import ( - signature_string, indent, dtype_repr, moveaxis, writable_array) + signature_string, indent, dtype_repr, writable_array, is_int) +from odl.util.npy_compat import moveaxis __all__ = ('PointwiseNorm', 'PointwiseInner', 'PointwiseSum', 'MatrixOperator', @@ -996,7 +996,7 @@ def _normalize_sampling_points(sampling_points, ndim): raise ValueError('`sampling_points` must be empty for ' '0-dim. `domain`') elif ndim == 1: - if isinstance(sampling_points, Integral): + if is_int(sampling_points): sampling_points = (sampling_points,) sampling_points = np.array(sampling_points, dtype=int, copy=False, ndmin=1) diff --git a/odl/set/sets.py b/odl/set/sets.py index 16de1b8fa9e..6730dda77ad 100644 --- a/odl/set/sets.py +++ b/odl/set/sets.py @@ -10,11 +10,12 @@ from __future__ import print_function, division, absolute_import from builtins import int, object -from numbers import Integral, Real, Complex +from numbers import Real, Complex from past.types.basestring import basestring import numpy as np -from odl.util import is_int_dtype, is_real_dtype, is_numeric_dtype, unique +from odl.util import ( + is_int_dtype, is_real_dtype, is_numeric_dtype, unique, is_int) __all__ = ('Set', 'EmptySet', 'UniversalSet', 'Field', 'Integers', @@ -446,7 +447,7 @@ class Integers(Set): def __contains__(self, other): """Return ``other in self``.""" - return isinstance(other, Integral) + return is_int(other) def contains_set(self, other): """Test if ``other`` is a subset of the integers. diff --git a/odl/solvers/functional/default_functionals.py b/odl/solvers/functional/default_functionals.py index 96818dd38c0..360562f0100 100644 --- a/odl/solvers/functional/default_functionals.py +++ b/odl/solvers/functional/default_functionals.py @@ -9,7 +9,6 @@ """Default functionals defined on any space similar to R^n or L^2.""" from __future__ import print_function, division, absolute_import -from numbers import Integral import numpy as np from odl.solvers.functional.functional import (Functional, @@ -27,7 +26,8 @@ proximal_const_func, proximal_box_constraint, proximal_convex_conj_kl, proximal_convex_conj_kl_cross_entropy, combine_proximals, proximal_convex_conj) -from odl.util import conj_exponent, moveaxis +from odl.util import conj_exponent, is_int +from odl.util.npy_compat import moveaxis __all__ = ('ZeroFunctional', 'ConstantFunctional', 'ScalingFunctional', @@ -1658,8 +1658,8 @@ def __init__(self, *functionals): """ # Make a power space if the second argument is an integer if (len(functionals) == 2 and - isinstance(functionals[1], Integral)): - functionals = [functionals[0]] * functionals[1] + is_int(functionals[1])): + functionals = [functionals[0]] * int(functionals[1]) domains = [func.domain for func in functionals] domain = ProductSpace(*domains) diff --git a/odl/solvers/nonsmooth/proximal_operators.py b/odl/solvers/nonsmooth/proximal_operators.py index 112c2a74135..7499cc2229c 100644 --- a/odl/solvers/nonsmooth/proximal_operators.py +++ b/odl/solvers/nonsmooth/proximal_operators.py @@ -29,6 +29,7 @@ PointwiseNorm, MultiplyOperator) from odl.space import ProductSpace from odl.set.space import LinearSpaceElement +from odl.util import npy_erroroptions __all__ = ('combine_proximals', 'proximal_convex_conj', 'proximal_translation', @@ -1847,20 +1848,18 @@ def _call(self, x, out): # Lazy import to improve `import odl` time import scipy.special - if g is None: - # If g is None, it is taken as the one element - # Different branches of lambertw is not an issue, see Notes - lambw = scipy.special.lambertw( - (self.sigma / lam) * np.exp(x / lam)) - else: - # Different branches of lambertw is not an issue, see Notes - lambw = scipy.special.lambertw( - (self.sigma / lam) * g * np.exp(x / lam)) - - if not np.issubsctype(self.domain.dtype, np.complexfloating): - lambw = lambw.real + with npy_erroroptions(complex='ignore'): + if g is None: + # If g is None, it is taken as the one element + # Different branches of lambertw is not an issue, see Notes + lambw = scipy.special.lambertw( + (self.sigma / lam) * np.exp(x / lam)) + else: + # Different branches of lambertw is not an issue, see Notes + lambw = scipy.special.lambertw( + (self.sigma / lam) * g * np.exp(x / lam)) - lambw = x.space.element(lambw) + lambw = x.space.element(lambw) out.lincomb(1, x, -lam, lambw) diff --git a/odl/space/base_tensors.py b/odl/space/base_tensors.py index cc94efd347e..9f17aa293f6 100644 --- a/odl/space/base_tensors.py +++ b/odl/space/base_tensors.py @@ -91,18 +91,18 @@ def __init__(self, shape, dtype): self.__shape = dtype.shape + shape self.__dtype = dtype.base - if is_real_dtype(self.dtype): + if is_real_dtype(self.__dtype): # real includes non-floating-point like integers field = RealNumbers() - self.__real_dtype = self.dtype + self.__real_dtype = self.__dtype self.__real_space = self - self.__complex_dtype = TYPE_MAP_R2C.get(self.dtype, None) + self.__complex_dtype = TYPE_MAP_R2C.get(self.__dtype, None) self.__complex_space = None # Set in first call of astype - elif is_complex_floating_dtype(self.dtype): + elif is_complex_floating_dtype(self.__dtype): field = ComplexNumbers() - self.__real_dtype = TYPE_MAP_C2R[self.dtype] + self.__real_dtype = TYPE_MAP_C2R[self.__dtype] self.__real_space = None # Set in first call of astype - self.__complex_dtype = self.dtype + self.__complex_dtype = self.__dtype self.__complex_space = self else: field = None @@ -206,13 +206,13 @@ def complex_space(self): def _astype(self, dtype): """Internal helper for `astype`. - Subclasses with differing init parameters should overload this + Subclasses with different constructor signature should override this method. """ kwargs = {} + # Use weighting only for floating-point types, otherwise, e.g., + # `space.astype(bool)` would fail if is_floating_dtype(dtype): - # Use weighting only for floating-point types, otherwise, e.g., - # `space.astype(bool)` would fail weighting = getattr(self, 'weighting', None) if weighting is not None: kwargs['weighting'] = weighting @@ -227,13 +227,24 @@ def astype(self, dtype): dtype : Scalar data type of the returned space. Can be provided in any way the `numpy.dtype` constructor understands, e.g. - as built-in type or as a string. Data types with non-trivial - shapes are not allowed. + as built-in type or as a string. + For data types with non-trivial shape, that shape is added to + the left of the existing axes. Returns ------- newspace : `TensorSpace` Version of this space with given data type. + + Examples + -------- + >>> space = odl.rn(3) + >>> space.astype('float32') + rn(3, dtype='float32') + >>> space.astype(int) + tensor_space(3, dtype=int) + >>> space.astype((float, (2,))) + rn((2, 3)) """ if dtype is None: # Need to filter this out since Numpy iterprets it as 'float' @@ -243,8 +254,9 @@ def astype(self, dtype): if dtype == self.dtype: return self - if is_numeric_dtype(self.dtype): - # Caching for real and complex versions (exact dtype mappings) + # Invoke caching for real and complex versions (exact dtype mappings) + # if the provided dtype has no shape + if is_numeric_dtype(self.dtype) and dtype.shape == (): if dtype == self.__real_dtype: if self.__real_space is None: self.__real_space = self._astype(dtype) @@ -258,11 +270,28 @@ def astype(self, dtype): else: return self._astype(dtype) - @property - def default_order(self): - """Default storage order for new elements in this space. + def newaxis(self, index, shape, weighting=None): + """Return a copy of this space with extra axes inserted. - This property should be overridden by subclasses. + .. note:: + This function is most useful in the default case of per-axis + weighting factors. For weighting with a single constant + or with an array of the same shape as this space, "inserting" + new factors does not really make sense, and the ``weighting`` + argument will not be used. In these cases, it is better to + construct a new space altogether. + + Parameters + ---------- + index : int + Position at which to insert new axes. Negative indices are + counted backwards from the end. + shape : nonnegative int or sequence of nonnegative ints + Sizes of the axes that are to be inserted. + weighting : float, `array-like` or `Weighting`, optional + Weighting factors that should be used in the new axes. + The number of factors must correspond to the number of entries + in ``shape`` (1 if ``shape`` is an int). """ raise NotImplementedError('abstract method') @@ -471,6 +500,14 @@ def _divide(self, x1, x2, out): """ raise NotImplementedError('abstract method') + @property + def default_order(self): + """Default storage order for new elements in this space. + + This property should be overridden by subclasses. + """ + raise NotImplementedError('abstract method') + @staticmethod def default_dtype(field=None): """Return the default data type for a given field. @@ -566,6 +603,14 @@ def __setitem__(self, indices, values): """ raise NotImplementedError('abstract method') + @property + def data(self): + """The data structure holding this element's entries. + + This method should be overridden by subclasses. + """ + raise NotImplementedError('abstract method') + @property def impl(self): """Name of the implementation back-end of this tensor.""" @@ -807,11 +852,11 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # --- Get some parameters for later --- # # Arguments for `writable_array` and/or space constructors - out_dtype = kwargs.get('dtype', None) - if out_dtype is None: + dtype_out = kwargs.get('dtype', None) + if dtype_out is None: array_kwargs = {} else: - array_kwargs = {'dtype': out_dtype} + array_kwargs = {'dtype': dtype_out} # --- Evaluate ufunc --- # diff --git a/odl/space/fspace.py b/odl/space/fspace.py index dfbe9042141..05096133db5 100644 --- a/odl/space/fspace.py +++ b/odl/space/fspace.py @@ -21,7 +21,8 @@ is_real_dtype, is_complex_floating_dtype, dtype_repr, dtype_str, complex_dtype, real_dtype, signature_string, is_real_floating_dtype, is_valid_input_array, is_valid_input_meshgrid, - out_shape_from_array, out_shape_from_meshgrid, vectorize, writable_array) + out_shape_from_array, out_shape_from_meshgrid, vectorize, + writable_array, is_int, normalized_axis_indices) from odl.util.utility import preload_first_arg, getargspec @@ -97,8 +98,8 @@ def _default_in_place(func, x, out, **kwargs): # New array that is flat in the `out_shape` axes, reshape it # to the final `out_shape + scalar_shape`, using the same # order ('C') as the initial `result.ravel()`. - result = np.array(bcast_results, dtype=func.scalar_out_dtype) - result = result.reshape(func.out_shape + scalar_out_shape) + result = np.array(bcast_results, dtype=func.scalar_dtype_out) + result = result.reshape(func.shape_out + scalar_out_shape) # The following code is required to remove extra axes, e.g., when # the result has shape (2, 1, 3) but should have shape (2, 3). @@ -124,11 +125,11 @@ def _default_out_of_place(func, x, **kwargs): raise TypeError('cannot use in-place method to implement ' 'out-of-place non-vectorized evaluation') - dtype = func.space.scalar_out_dtype + dtype = func.space.scalar_dtype_out if dtype is None: dtype = np.result_type(*x) - out_shape = func.out_shape + scalar_out_shape + out_shape = func.shape_out + scalar_out_shape out = np.empty(out_shape, dtype=dtype) func._call_in_place(x, out=out, **kwargs) return out @@ -177,14 +178,14 @@ class FunctionSpace(LinearSpace): for details. """ - def __init__(self, domain, out_dtype=float): + def __init__(self, domain, dtype_out=float): """Initialize a new instance. Parameters ---------- domain : `Set` The domain of the functions. - out_dtype : optional + dtype_out : optional Data type of the return value of a function in this space. Can be provided in any way the `numpy.dtype` constructor understands, e.g. as built-in type or as a string. @@ -205,17 +206,17 @@ def __init__(self, domain, out_dtype=float): FunctionSpace(IntervalProd(0.0, 1.0)) Complex-valued functions on the same domain can be created by - specifying ``out_dtype``: + specifying ``dtype_out``: - >>> odl.FunctionSpace(domain, out_dtype=complex) - FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=complex) + >>> odl.FunctionSpace(domain, dtype_out=complex) + FunctionSpace(IntervalProd(0.0, 1.0), dtype_out=complex) To get vector- or tensor-valued functions, specify - ``out_dtype`` with shape: + ``dtype_out`` with shape: >>> vec_dtype = np.dtype((float, (3,))) # 3 components - >>> odl.FunctionSpace(domain, out_dtype=vec_dtype) - FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (3,))) + >>> odl.FunctionSpace(domain, dtype_out=vec_dtype) + FunctionSpace(IntervalProd(0.0, 1.0), dtype_out=('float64', (3,))) """ if not isinstance(domain, Set): raise TypeError('`domain` must be a `Set` instance, got {!r}' @@ -223,14 +224,14 @@ def __init__(self, domain, out_dtype=float): self.__domain = domain # Prevent None from being converted to float64 by np.dtype - if out_dtype is None: - self.__out_dtype = None + if dtype_out is None: + self.__dtype_out = None else: - self.__out_dtype = np.dtype(out_dtype) + self.__dtype_out = np.dtype(dtype_out) - if is_real_dtype(self.out_dtype): + if is_real_dtype(self.dtype_out): field = RealNumbers() - elif is_complex_floating_dtype(self.out_dtype): + elif is_complex_floating_dtype(self.dtype_out): field = ComplexNumbers() else: field = None @@ -239,20 +240,20 @@ def __init__(self, domain, out_dtype=float): # Init cache attributes for real / complex variants if self.field == RealNumbers(): - self.__real_out_dtype = self.out_dtype + self.__real_dtype_out = self.dtype_out self.__real_space = self - self.__complex_out_dtype = complex_dtype(self.out_dtype, + self.__complex_dtype_out = complex_dtype(self.dtype_out, default=np.dtype(object)) self.__complex_space = None elif self.field == ComplexNumbers(): - self.__real_out_dtype = real_dtype(self.out_dtype) + self.__real_dtype_out = real_dtype(self.dtype_out) self.__real_space = None - self.__complex_out_dtype = self.out_dtype + self.__complex_dtype_out = self.dtype_out self.__complex_space = self else: - self.__real_out_dtype = None + self.__real_dtype_out = None self.__real_space = None - self.__complex_out_dtype = None + self.__complex_dtype_out = None self.__complex_space = None @property @@ -261,68 +262,68 @@ def domain(self): return self.__domain @property - def out_dtype(self): + def dtype_out(self): """Output data type (including shape) of a function in this space. If ``None``, the output data type is not pre-defined and instead inferred at run-time. """ - return self.__out_dtype + return self.__dtype_out @property - def scalar_out_dtype(self): - """Scalar variant of ``out_dtype`` in case it has a shape.""" - return getattr(self.out_dtype, 'base', None) + def scalar_dtype_out(self): + """Scalar variant of ``dtype_out`` in case it has a shape.""" + return getattr(self.dtype_out, 'base', None) @property - def real_out_dtype(self): - """The real dtype corresponding to this space's `out_dtype`.""" - if self.__real_out_dtype is None: + def real_dtype_out(self): + """The real dtype corresponding to this space's `dtype_out`.""" + if self.__real_dtype_out is None: raise AttributeError( 'no real variant of output dtype {} defined' - ''.format(dtype_repr(self.scalar_out_dtype))) + ''.format(dtype_repr(self.scalar_dtype_out))) else: - return self.__real_out_dtype + return self.__real_dtype_out @property - def complex_out_dtype(self): - """The complex dtype corresponding to this space's `out_dtype`.""" - if self.__complex_out_dtype is None: + def complex_dtype_out(self): + """The complex dtype corresponding to this space's `dtype_out`.""" + if self.__complex_dtype_out is None: raise AttributeError( 'no complex variant of output dtype {} defined' - ''.format(dtype_repr(self.scalar_out_dtype))) + ''.format(dtype_repr(self.scalar_dtype_out))) else: - return self.__complex_out_dtype + return self.__complex_dtype_out @property def is_real(self): """True if this is a space of real valued functions.""" - return is_real_floating_dtype(self.scalar_out_dtype) + return is_real_floating_dtype(self.scalar_dtype_out) @property def is_complex(self): """True if this is a space of complex valued functions.""" - return is_complex_floating_dtype(self.scalar_out_dtype) + return is_complex_floating_dtype(self.scalar_dtype_out) @property - def out_shape(self): + def shape_out(self): """Shape of function values, ``()`` for scalar output.""" - return getattr(self.out_dtype, 'shape', ()) + return getattr(self.dtype_out, 'shape', ()) @property def tensor_valued(self): """``True`` if functions have multi-dim. output, else ``False``.""" - return (self.out_shape != ()) + return (self.shape_out != ()) @property def real_space(self): """The space corresponding to this space's `real_dtype`.""" - return self.astype(self.real_out_dtype) + return self.astype(self.real_dtype_out) @property def complex_space(self): """The space corresponding to this space's `complex_dtype`.""" - return self.astype(self.complex_out_dtype) + return self.astype(self.complex_dtype_out) def element(self, fcall=None, vectorized=True): """Create a `FunctionSpace` element. @@ -378,7 +379,7 @@ def element(self, fcall=None, vectorized=True): >>> # Space of vector-valued functions with 2 components >>> fspace = odl.FunctionSpace(odl.IntervalProd(0, 1), - ... out_dtype=(float, (2,))) + ... dtype_out=(float, (2,))) >>> # Possibility 1: provide component functions >>> func1 = fspace.element([lambda x: x + 1, np.negative]) >>> func1(0.5) @@ -427,7 +428,7 @@ def element(self, fcall=None, vectorized=True): domains work just analogously: >>> fspace = odl.FunctionSpace(odl.IntervalProd([0, 0], [1, 1]), - ... out_dtype=(float, (2, 3))) + ... dtype_out=(float, (2, 3))) >>> def pyfunc(x): ... return [[x[0], x[1], x[0] + x[1]], ... [1, 0, 2 * (x[0] + x[1])]] @@ -478,7 +479,7 @@ def element(self, fcall=None, vectorized=True): raise TypeError('non-vectorized `fcall` with `out` ' 'parameter not allowed') if self.field is not None: - otypes = [self.scalar_out_dtype] + otypes = [self.scalar_dtype_out] else: otypes = [] @@ -487,11 +488,11 @@ def element(self, fcall=None, vectorized=True): else: # This is for the case that an array-like of callables # is provided - if np.shape(fcall) != self.out_shape: + if np.shape(fcall) != self.shape_out: raise ValueError( 'invalid `fcall` {!r}: expected `None`, a callable or ' 'an array-like of callables whose shape matches ' - '`out_shape` {}'.format(self.out_shape)) + '`shape_out` {}'.format(self.shape_out)) fcalls = np.array(fcall, dtype=object, ndmin=1).ravel().tolist() if not vectorized: @@ -516,7 +517,7 @@ def wrapper(x, out=None, **kwargs): a list, handling all kinds of sequence entries (normal function, ufunc, constant, etc.). 2. Broadcast all results to the desired shape that is - determined by the space's ``out_shape`` and the + determined by the space's ``shape_out`` and the shape(s) of the input. 3. Form a big array containing the final result. @@ -562,7 +563,7 @@ def wrapper(x, out=None, **kwargs): else: if has_out: out = np.empty(scalar_out_shape, - dtype=self.scalar_out_dtype) + dtype=self.scalar_dtype_out) f(x, out=out, **kwargs) results.append(out) else: @@ -583,9 +584,9 @@ def wrapper(x, out=None, **kwargs): bcast_results.append(reshaped) out_arr = np.array(bcast_results, - dtype=self.scalar_out_dtype) + dtype=self.scalar_dtype_out) - return out_arr.reshape(self.out_shape + scalar_out_shape) + return out_arr.reshape(self.shape_out + scalar_out_shape) else: # In-place evaluation @@ -623,14 +624,14 @@ def zero_vec(x, out=None, **kwargs): raise TypeError('invalid input type') # For tensor-valued functions - out_shape = self.out_shape + scalar_out_shape + out_shape = self.shape_out + scalar_out_shape if out is None: - return np.zeros(out_shape, dtype=self.scalar_out_dtype) + return np.zeros(out_shape, dtype=self.scalar_dtype_out) else: # Need to go through an array to fill with the correct # zero value for all dtypes - fill_value = np.zeros(1, dtype=self.scalar_out_dtype)[0] + fill_value = np.zeros(1, dtype=self.scalar_dtype_out)[0] out.fill(fill_value) return self.element_type(self, zero_vec) @@ -647,12 +648,12 @@ def one_vec(x, out=None, **kwargs): else: raise TypeError('invalid input type') - out_shape = self.out_shape + scalar_out_shape + out_shape = self.shape_out + scalar_out_shape if out is None: - return np.ones(out_shape, dtype=self.scalar_out_dtype) + return np.ones(out_shape, dtype=self.scalar_dtype_out) else: - fill_value = np.ones(1, dtype=self.scalar_out_dtype)[0] + fill_value = np.ones(1, dtype=self.scalar_dtype_out)[0] out.fill(fill_value) return self.element_type(self, one_vec) @@ -665,18 +666,18 @@ def __eq__(self, other): equals : bool ``True`` if ``other`` is a `FunctionSpace` with same `FunctionSpace.domain`, `FunctionSpace.field` and - `FunctionSpace.out_dtype`, ``False`` otherwise. + `FunctionSpace.dtype_out`, ``False`` otherwise. """ if other is self: return True return (type(other) is type(self) and self.domain == other.domain and - self.out_dtype == other.out_dtype) + self.dtype_out == other.dtype_out) def __hash__(self): """Return ``hash(self)``.""" - return hash((type(self), self.domain, self.out_dtype)) + return hash((type(self), self.domain, self.dtype_out)) def __contains__(self, other): """Return ``other in self``. @@ -691,16 +692,16 @@ def __contains__(self, other): return (isinstance(other, self.element_type) and other.space == self) - def _astype(self, out_dtype): + def _astype(self, dtype_out): """Internal helper for ``astype``.""" - return type(self)(self.domain, out_dtype=out_dtype) + return type(self)(self.domain, dtype_out=dtype_out) - def astype(self, out_dtype): - """Return a copy of this space with new ``out_dtype``. + def astype(self, dtype_out): + """Return a copy of this space with new ``dtype_out``. Parameters ---------- - out_dtype : + dtype_out : Output data type of the returned space. Can be given in any way `numpy.dtype` understands, e.g. as string (``'complex64'``) or built-in type (``complex``). ``None`` is interpreted as @@ -711,27 +712,27 @@ def astype(self, out_dtype): newspace : `FunctionSpace` The version of this space with given data type """ - out_dtype = np.dtype(out_dtype) - if out_dtype == self.out_dtype: + dtype_out = np.dtype(dtype_out) + if dtype_out == self.dtype_out: return self # Try to use caching for real and complex versions (exact dtype # mappings). This may fail for certain dtype, in which case we # just go to `_astype` directly. - real_dtype = getattr(self, 'real_out_dtype', None) + real_dtype = getattr(self, 'real_dtype_out', None) if real_dtype is None: - return self._astype(out_dtype) + return self._astype(dtype_out) else: - if out_dtype == real_dtype: + if dtype_out == real_dtype: if self.__real_space is None: - self.__real_space = self._astype(out_dtype) + self.__real_space = self._astype(dtype_out) return self.__real_space - elif out_dtype == self.complex_out_dtype: + elif dtype_out == self.complex_dtype_out: if self.__complex_space is None: - self.__complex_space = self._astype(out_dtype) + self.__complex_space = self._astype(dtype_out) return self.__complex_space else: - return self._astype(out_dtype) + return self._astype(dtype_out) def _lincomb(self, a, f1, b, f2, out): """Linear combination of ``f1`` and ``f2``. @@ -750,9 +751,9 @@ def lincomb_oop(x, **kwargs): # Not optimized since that raises issues with alignment # of input and partial results out = a * np.asarray(f1_copy(x, **kwargs), - dtype=self.scalar_out_dtype) + dtype=self.scalar_dtype_out) tmp = b * np.asarray(f2_copy(x, **kwargs), - dtype=self.scalar_out_dtype) + dtype=self.scalar_dtype_out) out += tmp return out @@ -777,7 +778,7 @@ def _multiply(self, f1, f2, out): def product_oop(x, **kwargs): """Product out-of-place evaluation function.""" return np.asarray(f1_copy(x, **kwargs) * f2_copy(x, **kwargs), - dtype=self.scalar_out_dtype) + dtype=self.scalar_dtype_out) out._call_out_of_place = product_oop decorator = preload_first_arg(out, 'in-place') @@ -800,7 +801,7 @@ def _divide(self, f1, f2, out): def quotient_oop(x, **kwargs): """Quotient out-of-place evaluation function.""" return np.asarray(f1_copy(x, **kwargs) / f2_copy(x, **kwargs), - dtype=self.scalar_out_dtype) + dtype=self.scalar_dtype_out) out._call_out_of_place = quotient_oop decorator = preload_first_arg(out, 'in-place') @@ -841,10 +842,10 @@ def power_oop(x, **kwargs): return self.one() elif p == int(p) and p >= 1: return np.asarray(pow_posint(f_copy(x, **kwargs), int(p)), - dtype=self.scalar_out_dtype) + dtype=self.scalar_dtype_out) else: result = np.power(f_copy(x, **kwargs), p) - return result.astype(self.scalar_out_dtype) + return result.astype(self.scalar_dtype_out) out._call_out_of_place = power_oop decorator = preload_first_arg(out, 'in-place') @@ -856,10 +857,10 @@ def _realpart(self, f): """Function returning the real part of the result from ``f``.""" def f_re(x, **kwargs): result = np.asarray(f(x, **kwargs), - dtype=self.scalar_out_dtype) + dtype=self.scalar_dtype_out) return result.real - if is_real_dtype(self.out_dtype): + if is_real_dtype(self.dtype_out): return f else: return self.real_space.element(f_re) @@ -868,10 +869,10 @@ def _imagpart(self, f): """Function returning the imaginary part of the result from ``f``.""" def f_im(x, **kwargs): result = np.asarray(f(x, **kwargs), - dtype=self.scalar_out_dtype) + dtype=self.scalar_dtype_out) return result.imag - if is_real_dtype(self.out_dtype): + if is_real_dtype(self.dtype_out): return self.zero() else: return self.real_space.element(f_im) @@ -880,37 +881,116 @@ def _conj(self, f): """Function returning the complex conjugate of a result.""" def f_conj(x, **kwargs): result = np.asarray(f(x, **kwargs), - dtype=self.scalar_out_dtype) + dtype=self.scalar_dtype_out) return result.conj() - if is_real_dtype(self.out_dtype): + if is_real_dtype(self.dtype_out): return f else: return self.element(f_conj) + @property + def byaxis(self): + """Object to index along output and input dimensions. + + .. note:: + - Output dimensions come first (function components), then + input dimensions (function domain). + - If the index expression does not contain an index from the + "in" or "out" parts, the result will have no axes in that part. + For output dimensions, this means that the resulting space will + be scalar-valued, for input dimensions, the domain will be + empty, which is usually not desired. + - If a list of integers is given, the respective axes are + "stacked" as desired. However, it is not possible to mix + output with input axes. + + See Also + -------- + byaxis_out : index along output axes only + byaxis_in : index along input axes only + + Examples + -------- + Indexing with integers or slices: + + >>> domain = odl.IntervalProd(0, 1) + >>> fspace = odl.FunctionSpace(domain, dtype_out=(float, (2, 3, 4))) + >>> fspace.byaxis[0] + FunctionSpace(IntervalProd([], []), dtype_out=('float64', (2,))) + >>> fspace.byaxis[0, -1] + FunctionSpace(IntervalProd(0.0, 1.0), dtype_out=('float64', (2,))) + >>> fspace.byaxis[1:] + FunctionSpace(IntervalProd(0.0, 1.0), dtype_out=('float64', (3, 4))) + """ + space = self + + class FspaceByaxis(object): + + """Helper class for indexing by axes.""" + + def __getitem__(self, indices): + """Return ``self[indices]``. + + Parameters + ---------- + indices : index expression + Object used to select axes. This can be either an int, + slice or sequence of integers. + + Returns + ------- + space : `FunctionSpace` + The resulting space with roughly the same properties. + """ + ndim_out = len(space.shape_out) + ndim = ndim_out + space.domain.ndim + indices, indices_in = (normalized_axis_indices(indices, ndim), + indices) + if _out_in_indices_overlap(indices, ndim_out): + raise ValueError( + '`indices` {} has overlapping output and input ' + 'axes'.format(indices_in)) + + idcs_out = [i for i in indices if i < ndim_out] + idcs_in = [i - ndim_out for i in indices if i >= ndim_out] + + domain = space.domain[idcs_in] # this slices by axis + shape_out = tuple(space.shape_out[i] for i in idcs_out) + dtype_out = (space.scalar_dtype_out, shape_out) + return FunctionSpace(domain, dtype_out) + + def __repr__(self): + """Return ``repr(self)``.""" + return repr(space) + '.byaxis' + + return FspaceByaxis() + @property def byaxis_out(self): """Object to index along output dimensions. - This is only valid for non-trivial `out_shape`. - Examples -------- Indexing with integers or slices: >>> domain = odl.IntervalProd(0, 1) - >>> fspace = odl.FunctionSpace(domain, out_dtype=(float, (2, 3, 4))) + >>> fspace = odl.FunctionSpace(domain, dtype_out=(float, (2, 3, 4))) >>> fspace.byaxis_out[0] - FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (2,))) + FunctionSpace(IntervalProd(0.0, 1.0), dtype_out=('float64', (2,))) >>> fspace.byaxis_out[1] - FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (3,))) + FunctionSpace(IntervalProd(0.0, 1.0), dtype_out=('float64', (3,))) >>> fspace.byaxis_out[1:] - FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (3, 4))) + FunctionSpace(IntervalProd(0.0, 1.0), dtype_out=('float64', (3, 4))) Lists can be used to stack spaces arbitrarily: >>> fspace.byaxis_out[[2, 1, 2]] - FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (4, 3, 4))) + FunctionSpace(IntervalProd(0.0, 1.0), dtype_out=('float64', (4, 3, 4))) + + See Also + -------- + byaxis_in : index along input axes """ space = self @@ -931,21 +1011,12 @@ def __getitem__(self, indices): space : `FunctionSpace` The resulting space with same domain and scalar output data type, but indexed output components. - - Raises - ------ - IndexError - If this is a space of scalar-valued functions. """ - try: - iter(indices) - except TypeError: - newshape = space.out_shape[indices] - else: - newshape = tuple(space.out_shape[int(i)] for i in indices) - - dtype = (space.scalar_out_dtype, newshape) - return FunctionSpace(space.domain, out_dtype=dtype) + ndim_out = len(space.shape_out) + ndim = ndim_out + space.domain.ndim + idcs_out = normalized_axis_indices(indices, ndim_out) + idcs_in = tuple(range(ndim_out, ndim)) + return space.byaxis[idcs_out + idcs_in] def __repr__(self): """Return ``repr(self)``.""" @@ -974,6 +1045,10 @@ def byaxis_in(self): >>> fspace.byaxis_in[[2, 1, 2]] FunctionSpace(IntervalProd([ 0., 0., 0.], [ 3., 2., 3.])) + + See Also + -------- + byaxis_out : index along output axes """ space = self @@ -996,7 +1071,7 @@ def __getitem__(self, indices): indexed domain. """ domain = space.domain[indices] - return FunctionSpace(domain, out_dtype=space.out_dtype) + return FunctionSpace(domain, dtype_out=space.dtype_out) def __repr__(self): """Return ``repr(self)``.""" @@ -1087,9 +1162,9 @@ def element_type(self): def __repr__(self): """Return ``repr(self)``.""" posargs = [self.domain] - optargs = [('out_dtype', dtype_str(self.out_dtype), 'float')] + optargs = [('dtype_out', dtype_str(self.dtype_out), 'float')] if (self.tensor_valued or - self.scalar_out_dtype in (float, complex, int, bool)): + self.scalar_dtype_out in (float, complex, int, bool)): optmod = '!s' else: optmod = '' @@ -1142,22 +1217,22 @@ def domain(self): return self.space.domain @property - def out_dtype(self): + def dtype_out(self): """Output data type of this function. If ``None``, the output data type is not uniquely pre-defined. """ - return self.space.out_dtype + return self.space.dtype_out @property - def scalar_out_dtype(self): - """Scalar variant of ``out_dtype`` in case it has a shape.""" - return self.space.scalar_out_dtype + def scalar_dtype_out(self): + """Scalar variant of ``dtype_out`` in case it has a shape.""" + return self.space.scalar_dtype_out @property - def out_shape(self): + def shape_out(self): """Shape of function values, ``()`` for scalar output.""" - return self.space.out_shape + return self.space.shape_out @property def tensor_valued(self): @@ -1301,9 +1376,9 @@ def __call__(self, x, out=None, **kwargs): 'the domain {}'.format(self.domain)) if scalar_in: - out_shape = self.out_shape + out_shape = self.shape_out else: - out_shape = self.out_shape + scalar_out_shape + out_shape = self.shape_out + scalar_out_shape # Call the function and check out shape, before or after if out is None: @@ -1330,7 +1405,7 @@ def __call__(self, x, out=None, **kwargs): if isinstance(out, np.ndarray) or np.isscalar(out): # Cast to proper dtype if needed, also convert to array if out # is a scalar. - out = np.asarray(out, dtype=self.space.scalar_out_dtype) + out = np.asarray(out, dtype=self.space.scalar_dtype_out) if scalar_in: out = np.squeeze(out) elif ndim == 1 and out.shape == (1,) + out_shape: @@ -1346,8 +1421,14 @@ def __call__(self, x, out=None, **kwargs): elif self.space.tensor_valued: # The out object can be any array-like of objects with shapes - # that should all be broadcastable to scalar_out_shape. - results = np.array(out) + # that should all be broadcastable to scalar_out_shape + try: + results = np.array(out) + except ValueError: + # For some broadcasting sitations, the above call fails. + # We need to use `object` explicitly then + results = np.array(out, dtype=object) + if results.dtype == object or scalar_in: # Some results don't have correct shape, need to # broadcast @@ -1366,13 +1447,13 @@ def __call__(self, x, out=None, **kwargs): np.broadcast_to(res, scalar_out_shape)) out_arr = np.array(bcast_res, - dtype=self.space.scalar_out_dtype) - elif (self.scalar_out_dtype is not None and - results.dtype != self.scalar_out_dtype): + dtype=self.space.scalar_dtype_out) + elif (self.scalar_dtype_out is not None and + results.dtype != self.scalar_dtype_out): raise ValueError( 'result is of dtype {}, expected {}' ''.format(dtype_repr(results.dtype), - dtype_repr(self.space.scalar_out_dtype))) + dtype_repr(self.space.scalar_dtype_out))) else: out_arr = results @@ -1390,10 +1471,10 @@ def __call__(self, x, out=None, **kwargs): raise ValueError('output shape {} not equal to shape ' '{} expected from input' ''.format(out.shape, out_shape)) - if (self.out_dtype is not None and - out.dtype != self.scalar_out_dtype): - raise ValueError('`out.dtype` ({}) does not match out_dtype ' - '({})'.format(out.dtype, self.out_dtype)) + if (self.dtype_out is not None and + out.dtype != self.scalar_dtype_out): + raise ValueError('`out.dtype` ({}) does not match dtype_out ' + '({})'.format(out.dtype, self.dtype_out)) if ndim == 1 and not self.tensor_valued: # TypeError for meshgrid in 1d, but expected array (see above) @@ -1516,7 +1597,7 @@ def __str__(self): # Try to get a pretty-print name of the function fname = getattr(func, '__name__', getattr(func, 'name', str(func))) - return '{}: {} --> {}'.format(fname, self.domain, self.out_dtype) + return '{}: {} --> {}'.format(fname, self.domain, self.dtype_out) def __repr__(self): """Return ``repr(self)``.""" @@ -1528,6 +1609,28 @@ def __repr__(self): return '{!r}.element({!r})'.format(self.space, func) +def _out_in_indices_overlap(indices, ndim_out): + """Return whether index parts overlap for per-axis indexing of a space. + + This function is intended for checking of an error condition in + per-axis indexing of tensor-valued spaces. The error condition is that + indices are given as a list, and input and output axes are mixed. + """ + if not indices: + return False + + out_idcs = [i for i in indices if i < ndim_out] + in_idcs = [i for i in indices if i >= ndim_out] + if not out_idcs or not in_idcs: + return False + + outmax = max(out_idcs) + outmax_idx = len(indices) - 1 - indices[::-1].index(outmax) + inmin = min(in_idcs) + inmin_idx = indices.index(inmin) + return inmin_idx < outmax_idx + + if __name__ == '__main__': from odl.util.testutils import run_doctests run_doctests() diff --git a/odl/space/npy_tensors.py b/odl/space/npy_tensors.py index 78f680b6b22..99664d42cf5 100644 --- a/odl/space/npy_tensors.py +++ b/odl/space/npy_tensors.py @@ -9,8 +9,9 @@ """NumPy implementation of tensor spaces.""" from __future__ import print_function, division, absolute_import -from future.utils import native from builtins import object +from future.utils import native +from future.moves.itertools import zip_longest import ctypes from functools import partial import numpy as np @@ -19,11 +20,12 @@ from odl.set.space import LinearSpaceTypeError from odl.space.base_tensors import TensorSpace, Tensor from odl.space.weighting import ( - Weighting, ArrayWeighting, ConstWeighting, + Weighting, ArrayWeighting, ConstWeighting, PerAxisWeighting, CustomInner, CustomNorm, CustomDist) from odl.util import ( - dtype_str, signature_string, is_real_dtype, is_numeric_dtype, - writable_array, is_floating_dtype) + dtype_str, signature_string, is_real_dtype, is_numeric_dtype, array_str, + array_hash, indent, fast_1d_tensor_mult, writable_array, is_floating_dtype, + simulate_slicing, normalized_index_expression) __all__ = ('NumpyTensorSpace',) @@ -74,7 +76,7 @@ class NumpyTensorSpace(TensorSpace): """ def __init__(self, shape, dtype=None, **kwargs): - """Initialize a new instance. + r"""Initialize a new instance. Parameters ---------- @@ -90,8 +92,8 @@ def __init__(self, shape, dtype=None, **kwargs): Exponent of the norm. For values other than 2.0, no inner product is defined. - This option has no impact if either ``dist``, ``norm`` or - ``inner`` is given, or if ``dtype`` is non-numeric. + This option has no impact if either of ``weighting``, ``dist``, + ``norm`` or ``inner`` is given, or if ``dtype`` is non-numeric. Default: 2.0 @@ -101,14 +103,15 @@ def __init__(self, shape, dtype=None, **kwargs): Use weighted inner product, norm, and dist. The following types are supported as ``weighting``: - ``None``: no weighting, i.e. weighting with ``1.0`` (default). - - `Weighting`: Use this weighting as-is. Compatibility - with this space's elements is not checked during init. - - ``float``: Weighting by a constant. - - array-like: Pointwise weighting by an array. + - ``None``: No weighting, i.e. weighting with ``1.0`` for + each axis (default). + - ``float``: Weighting by a constant. + - sequence of length ``ndim``: Separate weighting per axis. + Entries can be constants or 1D arrays. + - array-like: Pointwise weighting by an array of the same + ``shape`` as this space. + - `Weighting`: Use this weighting as-is. Compatibility + with this space is not checked during init. This option cannot be combined with ``dist``, ``norm`` or ``inner``. It also cannot be used in case of @@ -160,41 +163,41 @@ def __init__(self, shape, dtype=None, **kwargs): Notes ----- - - A distance function or metric on a space :math:`\mathcal{X}` + - A distance function or metric on a space :math:`X` is a mapping - :math:`d:\mathcal{X} \\times \mathcal{X} \\to \mathbb{R}` + :math:`d:X \times X \to \mathbb{R}` satisfying the following conditions for all space elements :math:`x, y, z`: * :math:`d(x, y) \geq 0`, * :math:`d(x, y) = 0 \Leftrightarrow x = y`, * :math:`d(x, y) = d(y, x)`, - * :math:`d(x, y) \\leq d(x, z) + d(z, y)`. + * :math:`d(x, y) \leq d(x, z) + d(z, y)`. - - A norm on a space :math:`\mathcal{X}` is a mapping - :math:`\| \cdot \|:\mathcal{X} \\to \mathbb{R}` + - A norm on a space :math:`X` is a mapping + :math:`\| \cdot \|:X \to \mathbb{R}` satisfying the following conditions for all space elements :math:`x, y`: and scalars :math:`s`: * :math:`\| x\| \geq 0`, * :math:`\| x\| = 0 \Leftrightarrow x = 0`, * :math:`\| sx\| = |s| \cdot \| x \|`, - * :math:`\| x+y\| \\leq \| x\| + + * :math:`\| x+y\| \leq \| x\| + \| y\|`. - - An inner product on a space :math:`\mathcal{X}` over a field + - An inner product on a space :math:`X` over a field :math:`\mathbb{F} = \mathbb{R}` or :math:`\mathbb{C}` is a mapping - :math:`\\langle\cdot, \cdot\\rangle: \mathcal{X} \\times - \mathcal{X} \\to \mathbb{F}` + :math:`\langle\cdot, \cdot\rangle: X \times + X \to \mathbb{F}` satisfying the following conditions for all space elements :math:`x, y, z`: and scalars :math:`s`: - * :math:`\\langle x, y\\rangle = - \overline{\\langle y, x\\rangle}`, - * :math:`\\langle sx + y, z\\rangle = s \\langle x, z\\rangle + - \\langle y, z\\rangle`, - * :math:`\\langle x, x\\rangle = 0 \Leftrightarrow x = 0`. + * :math:`\langle x, y\rangle = + \overline{\langle y, x\rangle}`, + * :math:`\langle sx + y, z\rangle = s \langle x, z\rangle + + \langle y, z\rangle`, + * :math:`\langle x, x\rangle = 0 \Leftrightarrow x = 0`. Examples -------- @@ -218,7 +221,9 @@ def __init__(self, shape, dtype=None, **kwargs): tensor_space((2, 3), dtype=int) """ super(NumpyTensorSpace, self).__init__(shape, dtype) - if self.dtype.char not in self.available_dtypes(): + # Check if dtype is supported; to include variable-size dtypes + # (mostly string types) we check `dtype.char` + if np.dtype(self.dtype.char) not in self.available_dtypes(): raise ValueError('`dtype` {!r} not supported' ''.format(dtype_str(dtype))) @@ -245,6 +250,7 @@ def __init__(self, shape, dtype=None, **kwargs): # Set the weighting if weighting is not None: + if isinstance(weighting, Weighting): if weighting.impl != 'numpy': raise ValueError("`weighting.impl` must be 'numpy', " @@ -254,25 +260,51 @@ def __init__(self, shape, dtype=None, **kwargs): '`exponent`: {} != {}' ''.format(weighting.exponent, exponent)) self.__weighting = weighting + + elif np.isscalar(weighting): + if self.ndim == 1: + # Prefer per-axis weighting if possible, it behaves better + self.__weighting = NumpyTensorSpacePerAxisWeighting( + [weighting], exponent) + else: + self.__weighting = NumpyTensorSpaceConstWeighting( + weighting, exponent) + + elif len(weighting) == self.ndim: + self.__weighting = NumpyTensorSpacePerAxisWeighting( + weighting, exponent) + else: - self.__weighting = _weighting(weighting, exponent) + array = np.asarray(weighting) + if array.dtype == object: + raise ValueError( + 'invalid `weighting` provided; valid inputs are ' + '`None`, constants, sequences of length `ndim` ' + "and array-like objects of this space's `shape`") + self.__weighting = NumpyTensorSpaceArrayWeighting( + array, exponent) # Check (afterwards) that the weighting input was sane - if isinstance(self.weighting, NumpyTensorSpaceArrayWeighting): - if self.weighting.array.dtype == object: - raise ValueError('invalid `weighting` argument: {}' - ''.format(weighting)) - elif not np.can_cast(self.weighting.array.dtype, self.dtype): + if isinstance(self.__weighting, NumpyTensorSpaceArrayWeighting): + if not np.can_cast(self.__weighting.array.dtype, self.dtype): raise ValueError( 'cannot cast from `weighting` data type {} to ' 'the space `dtype` {}' - ''.format(dtype_str(self.weighting.array.dtype), + ''.format(dtype_str(self.__weighting.array.dtype), dtype_str(self.dtype))) - if self.weighting.array.shape != self.shape: + if self.__weighting.array.shape != self.shape: raise ValueError('array-like weights must have same ' 'shape {} as this space, got {}' ''.format(self.shape, - self.weighting.array.shape)) + self.__weighting.array.shape)) + + elif isinstance(self.__weighting, + NumpyTensorSpacePerAxisWeighting): + if len(self.__weighting.factors) != self.ndim: + raise ValueError( + 'per-axis weighting must have {} (=ndim) factors, ' + 'got {}'.format(self.ndim, + len(self.__weighting.factors))) elif dist is not None: self.__weighting = NumpyTensorSpaceCustomDist(dist) @@ -281,8 +313,9 @@ def __init__(self, shape, dtype=None, **kwargs): elif inner is not None: self.__weighting = NumpyTensorSpaceCustomInner(inner) else: - # No weighting, i.e., weighting with constant 1.0 - self.__weighting = NumpyTensorSpaceConstWeighting(1.0, exponent) + # No weighting, i.e., weighting with constant 1.0 in each axis + self.__weighting = NumpyTensorSpacePerAxisWeighting( + [1.0] * self.ndim, exponent) # Make sure there are no leftover kwargs if kwargs: @@ -305,10 +338,11 @@ def weighting(self): @property def is_weighted(self): - """Return ``True`` if the space is not weighted by constant 1.0.""" + """Return ``True`` if the space is not weighted by 1.0 in each axis.""" return not ( - isinstance(self.weighting, NumpyTensorSpaceConstWeighting) and - self.weighting.const == 1.0) + isinstance(self.weighting, NumpyTensorSpacePerAxisWeighting) and + self.weighting.array_axes == () and + self.weighting.consts == (1.0,) * self.ndim) @property def exponent(self): @@ -439,6 +473,137 @@ def element(self, inp=None, data_ptr=None, order=None): else: raise TypeError('cannot provide both `inp` and `data_ptr`') + def _astype(self, dtype): + """Internal helper for `astype`. + + Subclasses with different constructor signature should override this + method. + """ + kwargs = {} + dtype = np.dtype(dtype) + if dtype.shape != (): + raise ValueError('`dtype` with shape not allowed') + + # Use weighting only for floating-point types, otherwise, e.g., + # `space.astype(bool)` would fail + if is_floating_dtype(dtype) and dtype.shape == (): + # Standard case, basically pass-through + weighting = getattr(self, 'weighting', None) + if weighting is not None: + kwargs['weighting'] = weighting + + elif is_floating_dtype(dtype) and dtype.shape != (): + # Got nontrivial `dtype.shape`, make new axes accordingly + weighting_slc = ( + (None,) * len(dtype.shape) + (slice(None),) * self.ndim + ) + weighting = slice_weighting( + self.weighting, self.shape, weighting_slc) + kwargs['weighting'] = weighting + + return type(self)(dtype.shape + self.shape, dtype=dtype.base, **kwargs) + + def newaxis(self, index, shape, weighting=None): + """Return a copy of this space with extra axes inserted. + + .. note:: + This function is most useful in the default case of per-axis + weighting factors. For weighting with a single constant, with + an array of the same shape as this space, or for custom + weighting classes, "inserting" new factors does not really make + sense. This function will therefore use the old constant in the + first case and fall back to no weighting in all other non-standard + cases. If that is not the desired outcome, a new space should + be constructed altogether. + + Parameters + ---------- + index : int + Position at which to insert new axes. Negative indices are + counted backwards from the end. + shape : nonnegative int or sequence of nonnegative ints + Sizes of the axes that are to be inserted. + weighting : `array-like` or sequence of `array-like`, optional + Weighting factors that should be used in the new axes. + The number of factors must correspond to the number of entries + in ``shape`` (1 if ``shape`` is an int). + + Examples + -------- + In the default unweighted case, the extended space is unweighted + as well, unless specified otherwise: + + >>> space = odl.rn(3) + >>> space.newaxis(0, 2) + rn((2, 3)) + >>> space.newaxis(1, 2) + rn((3, 2)) + >>> space = odl.rn((2, 5)) + >>> space.newaxis(1, (3, 4)) + rn((2, 3, 4, 5)) + >>> space.newaxis(1, (3, 4), weighting=[0.5, 2.0]) + rn((2, 3, 4, 5), weighting=(1.0, 0.5, 2.0, 1.0)) + + The (default) per-axis weighting is extended with factors 1.0 by + default, or other factors as given: + + >>> space = odl.rn((2, 4)) + >>> space.newaxis(1, 3) == space.newaxis(1, 3, weighting=[1.0]) + True + >>> space.newaxis(1, 3, weighting=[2, 1, 0.5]) + rn((2, 3, 4), weighting=(1.0, [ 2. , 1. , 0.5], 1.0)) + """ + # Check inputs + index, index_in = int(index), index + if index != index_in: + raise TypeError('`index` must be an int, got {!r}' + ''.format(index_in)) + if index < 0: + index += self.ndim + if not 0 <= index <= self.ndim: + raise IndexError( + '`index` must satisfy `-{ndim} <= index <= {ndim}`, got {0}' + ''.format(index_in, ndim=self.ndim)) + + try: + shape = tuple(shape) + except TypeError: + shape = (shape,) + + num_axes = len(shape) + + # Determine new weighting + if isinstance(self.weighting, PerAxisWeighting): + if weighting is None: + weighting = [1.0] * num_axes + else: + if num_axes == 1: + weighting = [ + np.asarray(weighting, dtype=self.dtype).squeeze() + ] + else: + weighting = [np.asarray(w, dtype=self.dtype) + for w in weighting] + + if len(weighting) != num_axes: + raise ValueError( + '`shape` has length {}, but got {} weighting factors' + ''.format(num_axes, len(weighting))) + + old_factors = list(self.weighting.factors) + new_factors = old_factors[:index] + weighting + old_factors[index:] + new_weighting = NumpyTensorSpacePerAxisWeighting( + new_factors, self.exponent) + + elif isinstance(self.weighting, ConstWeighting): + new_weighting = self.weighting + else: + new_weighting = None + + # Construct new space + new_shape = self.shape[:index] + shape + self.shape[index:] + return type(self)(new_shape, self.dtype, weighting=new_weighting) + def zero(self): """Return a tensor of all zeros. @@ -469,12 +634,17 @@ def one(self): def available_dtypes(): """Return the set of data types available in this implementation. + Returns + ------- + available_dtypes : set + Notes ----- - This is all dtypes available in Numpy. See ``numpy.sctypes`` - for more information. + This set includes all Numpy dtypes, except for ``object`` and + ``void``. See ``numpy.sctypes`` for more information. - The available dtypes may depend on the specific system used. + The available dtypes can depend on the operating system and the + Numpy version. """ all_dtypes = [] for lst in np.sctypes.values(): @@ -544,7 +714,7 @@ def _lincomb(self, a, x1, b, x2, out): >>> result is out True """ - _lincomb_impl(a, x1, b, x2, out) + _lincomb_impl(a, x1.data, b, x2.data, out.data) def _dist(self, x1, x2): """Return the distance between ``x1`` and ``x2``. @@ -761,6 +931,77 @@ def __hash__(self): return hash((super(NumpyTensorSpace, self).__hash__(), self.weighting)) + def __getitem__(self, indices): + """Return ``self[indices]``. + + For all supported cases, indexing is implemented such that for an + element ``x in space``, :: + + x[indices] in space[indices] + + Space indexing works with + + - integers, + - `slice` objects, + - index arrays, i.e., 1D array-like objects containing integers, + + and combinations of the above. It does not work with boolean arrays + or more advanced "fancy indexing". + + .. note:: + This method is a default implementation that propagates only + ``shape`` and ``dtype``. Subclasses with more properties + need to override the method. + + Examples + -------- + A single integer slices along the first axis (index does not + matter as long as it lies within the bounds): + + >>> rn = odl.rn((3, 4, 5, 6)) + >>> rn[0] + rn((4, 5, 6)) + >>> rn[2] + rn((4, 5, 6)) + + Multiple indices slice into corresponding axes from the left: + + >>> rn[0, 0] + rn((5, 6)) + >>> rn[0, 1, 1] + rn(6) + + Ellipsis (``...``) and ``slice(None)`` (``:``) can be used to keep + one or several axes intact: + + >>> rn[0, :, 1, :] + rn((4, 6)) + >>> rn[..., 0, 0] + rn((3, 4)) + + With slices, parts of axes can be selected: + + >>> rn[0, :3, 1:4, ::2] + rn((3, 3, 3)) + + Array-like objects (must all have the same 1D shape) of integers are + treated as follows: if their common length is ``n``, a new axis + of length ``n`` is created at the position of the leftmost index + array, and all index array axes are collapsed (Note: this is + not so useful for spaces, more so for elements): + + >>> rn[0, 0, [0, 1, 0, 2], :] + rn((4, 6)) + >>> rn[:, [1, 1], [0, 2], :] + rn((3, 2, 6)) + >>> rn[[2, 0], [3, 3], [0, 1], [5, 2]] + rn(2) + """ + new_shape, removed_axes, _, _ = simulate_slicing(self.shape, indices) + weighting = slice_weighting(self.weighting, self.shape, indices) + return type(self)(shape=new_shape, dtype=self.dtype, + weighting=weighting, exponent=self.exponent) + @property def byaxis(self): """Return the subspace defined along one or several dimensions. @@ -782,7 +1023,7 @@ def byaxis(self): """ space = self - class NpyTensorSpacebyaxis(object): + class NpyTensorSpaceByAxis(object): """Helper class for indexing by axis.""" @@ -791,24 +1032,25 @@ def __getitem__(self, indices): try: iter(indices) except TypeError: - newshape = space.shape[indices] + # Integer, slice or Ellipsis + indices = list(range(space.ndim))[indices] + if not isinstance(indices, list): + indices = [indices] else: - newshape = tuple(space.shape[i] for i in indices) + indices = [int(i) for i in indices] - if isinstance(space.weighting, ArrayWeighting): - new_array = np.asarray(space.weighting.array[indices]) - weighting = NumpyTensorSpaceArrayWeighting( - new_array, space.weighting.exponent) - else: - weighting = space.weighting - - return type(space)(newshape, space.dtype, weighting=weighting) + new_shape = tuple(space.shape[i] for i in indices) + new_weighting = slice_weighting_by_axis(space.weighting, + space.shape, indices) + return type(space)(new_shape, space.dtype, + weighting=new_weighting, + exponent=space.exponent) def __repr__(self): """Return ``repr(self)``.""" return repr(space) + '.byaxis' - return NpyTensorSpacebyaxis() + return NpyTensorSpaceByAxis() def __repr__(self): """Return ``repr(self)``.""" @@ -855,7 +1097,7 @@ class NumpyTensor(Tensor): def __init__(self, space, data): """Initialize a new instance.""" - Tensor.__init__(self, space) + super(Tensor, self).__init__(space) self.__data = data @property @@ -923,6 +1165,9 @@ def astype(self, dtype): newelem : `NumpyTensor` Version of this element with given data type. """ + dtype = np.dtype(dtype) + if dtype.shape != (): + raise ValueError('`dtype` with shape not allowed') return self.space.astype(dtype).element(self.data.astype(dtype)) @property @@ -1091,26 +1336,37 @@ def __getitem__(self, indices): [[-9., 2., -9.], [-9., 5., -9.]] ) + + More advanced indexing: + + >>> x = space.element([[1, 2, 3], + ... [4, 5, 6]]) + >>> x[[0, 0, 1], [2, 1, 0]] # entries (0, 2), (0, 1) and (1, 0) + rn(3).element([ 3., 2., 4.]) + >>> bool_elem = x.ufuncs.less(3) + >>> x[bool_elem] + rn(2).element([ 1., 2.]) """ - # Lazy implementation: index the array and deal with it if isinstance(indices, NumpyTensor): indices = indices.data - arr = self.data[indices] + res_arr = self.data[indices] - if np.isscalar(arr): + if np.isscalar(res_arr): if self.space.field is not None: - return self.space.field.element(arr) + return self.space.field.element(res_arr) else: - return arr + return res_arr else: - if is_numeric_dtype(self.dtype): - weighting = self.space.weighting - else: - weighting = None - space = type(self.space)( - arr.shape, dtype=self.dtype, exponent=self.space.exponent, - weighting=weighting) - return space.element(arr) + # If possible, ensure that + # `self.space[indices].shape == self[indices].shape` + try: + res_space = self.space[indices] + except TypeError: + # No weighting + res_space = type(self.space)( + res_arr.shape, dtype=self.dtype, + exponent=self.space.exponent) + return res_space.element(res_arr) def __setitem__(self, indices, values): """Implement ``self[indices] = values``. @@ -1632,25 +1888,29 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): out1 = out_tuple[0] out2 = out_tuple[1] - # --- Process `inputs` --- # - - # Convert inputs that are ODL tensors to Numpy arrays so that the - # native Numpy ufunc is called later - inputs = tuple( - inp.asarray() if isinstance(inp, type(self)) else inp - for inp in inputs) - # --- Get some parameters for later --- # # Arguments for `writable_array` and/or space constructors - out_dtype = kwargs.get('dtype', None) - if out_dtype is None: + dtype_out = kwargs.get('dtype', None) + if dtype_out is None: array_kwargs = {} else: - array_kwargs = {'dtype': out_dtype} + array_kwargs = {'dtype': dtype_out} exponent = self.space.exponent weighting = self.space.weighting + try: + weighting2 = inputs[1].space.weighting + except (IndexError, AttributeError): + weighting2 = None + + # --- Process `inputs` --- # + + # Convert inputs that are ODL tensors to Numpy arrays so that the + # native Numpy ufunc is called later + inputs = tuple( + inp.asarray() if isinstance(inp, type(self)) else inp + for inp in inputs) # --- Evaluate ufunc --- # @@ -1751,12 +2011,76 @@ class CtxNone(object): # Wrap result if necessary (lazily) if out is None: if is_floating_dtype(res.dtype): - if res.shape != self.shape: - # Don't propagate weighting if shape changes - weighting = NumpyTensorSpaceConstWeighting(1.0, - exponent) + + # For weighting, we need to check which axes remain in + # the result and slice the weighting accordingly + if method == 'reduce': + axis = kwargs.get('axis', 0) + try: + iter(axis) + except TypeError: + axis = [axis] + + if kwargs.get('keepdims', False): + reduced_axes = [] + else: + reduced_axes = [i for i in range(self.ndim) + if i not in axis] + weighting = slice_weighting_by_axis( + weighting, self.shape, reduced_axes) + + elif method == 'outer': + # Stack the weightings as well if possible. Stacking + # makes sense for per-axis weighting and constant + # weighting for 1D inputs. An input that has no + # weighting results in per-axis weighting with + # constants 1. + can_stack = True + if isinstance(weighting, + NumpyTensorSpacePerAxisWeighting): + factors = weighting.factors + elif (isinstance(weighting, + NumpyTensorSpaceConstWeighting) and + inputs[0].ndim == 1): + factors = (weighting.const,) + else: + can_stack = False + + if weighting2 is None: + factors = (1.0,) * (res.ndim - inputs[0].ndim) + elif isinstance(weighting2, + NumpyTensorSpacePerAxisWeighting): + factors2 = weighting2.factors + elif (isinstance(weighting2, + NumpyTensorSpaceConstWeighting) and + inputs[1].ndim == 1): + factors2 = (weighting2.const,) + elif (isinstance(weighting2, + NumpyTensorSpaceConstWeighting) and + weighting2.const == 1): + factors2 = (1.0,) * inputs[1].ndim + else: + can_stack = False + + if can_stack: + weighting = NumpyTensorSpacePerAxisWeighting( + factors + factors2, + exponent=weighting.exponent) + else: + weighting = NumpyTensorSpaceConstWeighting( + 1.0, exponent) + + elif (res.shape != self.shape and + not isinstance(weighting, + NumpyTensorSpaceConstWeighting)): + # For other cases, preserve constant weighting, and + # preserve other weightings if the shape is unchanged + weighting = NumpyTensorSpaceConstWeighting( + 1.0, exponent) + spc_kwargs = {'weighting': weighting} - else: + + else: # not is_floating_dtype(res.dtype) spc_kwargs = {} out_space = type(self.space)(res.shape, res.dtype, @@ -1766,6 +2090,9 @@ class CtxNone(object): return out +# --- Implementations of low-level functions --- # + + def _blas_is_applicable(*args): """Whether BLAS routines can be applied or not. @@ -1776,8 +2103,8 @@ def _blas_is_applicable(*args): Parameters ---------- - x1,...,xN : `NumpyTensor` - The tensors to be tested for BLAS conformity. + x1,...,xN : `numpy.ndarray` + The arrays to be tested for BLAS conformity. Returns ------- @@ -1805,14 +2132,13 @@ def _lincomb_impl(a, x1, b, x2, out): import scipy.linalg size = native(x1.size) - if size < THRESHOLD_SMALL: # Faster for small arrays - out.data[:] = a * x1.data + b * x2.data + out[:] = a * x1 + b * x2 return elif (size < THRESHOLD_MEDIUM or - not _blas_is_applicable(x1.data, x2.data, out.data)): + not _blas_is_applicable(x1, x2, out)): def fallback_axpy(x1, x2, n, a): """Fallback axpy implementation avoiding copy.""" @@ -1833,25 +2159,22 @@ def fallback_copy(x1, x2, n): return x2 axpy, scal, copy = (fallback_axpy, fallback_scal, fallback_copy) - x1_arr = x1.data - x2_arr = x2.data - out_arr = out.data else: # Need flat data for BLAS, otherwise in-place does not work. # Raveling must happen in fixed order for non-contiguous out, # otherwise 'A' is applied to arrays, which makes the outcome # dependent on their respective contiguousness. - if out.data.flags.f_contiguous: + if out.flags.f_contiguous: ravel_order = 'F' else: ravel_order = 'C' - x1_arr = x1.data.ravel(order=ravel_order) - x2_arr = x2.data.ravel(order=ravel_order) - out_arr = out.data.ravel(order=ravel_order) + x1 = x1.ravel(order=ravel_order) + x2 = x2.ravel(order=ravel_order) + out = out.ravel(order=ravel_order) axpy, scal, copy = scipy.linalg.blas.get_blas_funcs( - ['axpy', 'scal', 'copy'], arrays=(x1_arr, x2_arr, out_arr)) + ['axpy', 'scal', 'copy'], arrays=(x1, x2, out)) if x1 is x2 and b != 0: # x1 is aligned with x2 -> out = (a+b)*x1 @@ -1859,175 +2182,88 @@ def fallback_copy(x1, x2, n): elif out is x1 and out is x2: # All the vectors are aligned -> out = (a+b)*out if (a + b) != 0: - scal(a + b, out_arr, size) + scal(a + b, out, size) else: - out_arr[:] = 0 + out[:] = 0 elif out is x1: # out is aligned with x1 -> out = a*out + b*x2 if a != 1: - scal(a, out_arr, size) + scal(a, out, size) if b != 0: - axpy(x2_arr, out_arr, size, b) + axpy(x2, out, size, b) elif out is x2: # out is aligned with x2 -> out = a*x1 + b*out if b != 1: - scal(b, out_arr, size) + scal(b, out, size) if a != 0: - axpy(x1_arr, out_arr, size, a) + axpy(x1, out, size, a) else: # We have exhausted all alignment options, so x1 is not x2 is not out # We now optimize for various values of a and b if b == 0: if a == 0: # Zero assignment -> out = 0 - out_arr[:] = 0 + out[:] = 0 else: # Scaled copy -> out = a*x1 - copy(x1_arr, out_arr, size) + copy(x1, out, size) if a != 1: - scal(a, out_arr, size) + scal(a, out, size) else: # b != 0 if a == 0: # Scaled copy -> out = b*x2 - copy(x2_arr, out_arr, size) + copy(x2, out, size) if b != 1: - scal(b, out_arr, size) + scal(b, out, size) elif a == 1: # No scaling in x1 -> out = x1 + b*x2 - copy(x1_arr, out_arr, size) - axpy(x2_arr, out_arr, size, b) + copy(x1, out, size) + axpy(x2, out, size, b) else: # Generic case -> out = a*x1 + b*x2 - copy(x2_arr, out_arr, size) + copy(x2, out, size) if b != 1: - scal(b, out_arr, size) - axpy(x1_arr, out_arr, size, a) - - -def _weighting(weights, exponent): - """Return a weighting whose type is inferred from the arguments.""" - if np.isscalar(weights): - weighting = NumpyTensorSpaceConstWeighting(weights, exponent) - elif weights is None: - weighting = NumpyTensorSpaceConstWeighting(1.0, exponent) - else: # last possibility: make an array - arr = np.asarray(weights) - weighting = NumpyTensorSpaceArrayWeighting(arr, exponent) - return weighting - - -def npy_weighted_inner(weights): - """Weighted inner product on `TensorSpace`'s as free function. - - Parameters - ---------- - weights : scalar or `array-like` - Weights of the inner product. A scalar is interpreted as a - constant weight, a 1-dim. array as a weighting vector. - - Returns - ------- - inner : `callable` - Inner product function with given weight. Constant weightings - are applicable to spaces of any size, for arrays the sizes - of the weighting and the space must match. - - See Also - -------- - NumpyTensorSpaceConstWeighting - NumpyTensorSpaceArrayWeighting - """ - return _weighting(weights, exponent=2.0).inner + scal(b, out, size) + axpy(x1, out, size, a) -def npy_weighted_norm(weights, exponent=2.0): - """Weighted norm on `TensorSpace`'s as free function. - - Parameters - ---------- - weights : scalar or `array-like` - Weights of the norm. A scalar is interpreted as a - constant weight, a 1-dim. array as a weighting vector. - exponent : positive `float` - Exponent of the norm. - - Returns - ------- - norm : `callable` - Norm function with given weight. Constant weightings - are applicable to spaces of any size, for arrays the sizes - of the weighting and the space must match. - - See Also - -------- - NumpyTensorSpaceConstWeighting - NumpyTensorSpaceArrayWeighting - """ - return _weighting(weights, exponent=exponent).norm - - -def npy_weighted_dist(weights, exponent=2.0): - """Weighted distance on `TensorSpace`'s as free function. - - Parameters - ---------- - weights : scalar or `array-like` - Weights of the distance. A scalar is interpreted as a - constant weight, a 1-dim. array as a weighting vector. - exponent : positive `float` - Exponent of the norm. - - Returns - ------- - dist : `callable` - Distance function with given weight. Constant weightings - are applicable to spaces of any size, for arrays the sizes - of the weighting and the space must match. - - See Also - -------- - NumpyTensorSpaceConstWeighting - NumpyTensorSpaceArrayWeighting - """ - return _weighting(weights, exponent=exponent).dist - - -def _norm_default(x): +def _norm_impl(x): """Default Euclidean norm implementation.""" # Lazy import to improve `import odl` time import scipy.linalg - if _blas_is_applicable(x.data): + if _blas_is_applicable(x): nrm2 = scipy.linalg.blas.get_blas_funcs('nrm2', dtype=x.dtype) norm = partial(nrm2, n=native(x.size)) else: norm = np.linalg.norm - return norm(x.data.ravel()) + return norm(x.ravel()) -def _pnorm_default(x, p): +def _pnorm_impl(x, p): """Default p-norm implementation.""" - return np.linalg.norm(x.data.ravel(), ord=p) + return np.linalg.norm(x.ravel(), ord=p) -def _pnorm_diagweight(x, p, w): +def _pnorm_diagweight_impl(x, p, w): """Diagonally weighted p-norm implementation.""" # Ravel both in the same order (w is a numpy array) - order = 'F' if all(a.flags.f_contiguous for a in (x.data, w)) else 'C' + order = 'F' if all(a.flags.f_contiguous for a in (x, w)) else 'C' # This is faster than first applying the weights and then summing with # BLAS dot or nrm2 - xp = np.abs(x.data.ravel(order)) + xp = np.abs(x.ravel(order)) if p == float('inf'): - xp *= w.ravel(order) return np.max(xp) + elif p == -float('inf'): + return np.min(xp) else: xp = np.power(xp, p, out=xp) xp *= w.ravel(order) return np.sum(xp) ** (1 / p) -def _inner_default(x1, x2): +def _inner_impl(x1, x2): """Default Euclidean inner product implementation.""" # Ravel both in the same order - order = 'F' if all(a.data.flags.f_contiguous for a in (x1, x2)) else 'C' + order = 'F' if all(a.flags.f_contiguous for a in (x1, x2)) else 'C' if is_real_dtype(x1.dtype): if x1.size > THRESHOLD_MEDIUM: @@ -2035,16 +2271,100 @@ def _inner_default(x1, x2): return np.tensordot(x1, x2, [range(x1.ndim)] * 2) else: # Several times faster for small arrays - return np.dot(x1.data.ravel(order), - x2.data.ravel(order)) + return np.dot(x1.ravel(order), x2.ravel(order)) else: # x2 as first argument because we want linearity in x1 - return np.vdot(x2.data.ravel(order), - x1.data.ravel(order)) + return np.vdot(x2.ravel(order), x1.ravel(order)) + + +# --- Weightings --- # + + +def slice_weighting(weighting, space_shape, indices): + """Return a weighting for a space after indexing. + + The different types of weightings behave as follows: + + - ``ConstWeighting``: preserved + - ``PerAxisWeighting``: preserved, where factors are discarded for + removed axes and sliced for other axes in which an array is used + - ``ArrayWeighting``: preserved, using the sliced array for the + new weighting + - Other: not preserved, mapped to ``None``. + """ + indices = normalized_index_expression(indices, space_shape) + new_shape, removed_axes, new_axes, _ = simulate_slicing( + space_shape, indices) + + if isinstance(weighting, NumpyTensorSpaceConstWeighting): + new_weighting = weighting + + elif isinstance(weighting, NumpyTensorSpacePerAxisWeighting): + # Determine factors without `None` components + factors = [] + indices_no_none = [i for i in indices if i is not None] + for i, (fac, idx) in enumerate(zip_longest(weighting.factors, + indices_no_none, + fillvalue=slice(None))): + if i in removed_axes: + continue + + if fac.ndim == 0: + factors.append(fac) + else: + factors.append(fac[idx]) + + # Add 1.0 for new axes + for newax in new_axes: + factors.insert(newax, 1.0) + + new_weighting = NumpyTensorSpacePerAxisWeighting( + factors, weighting.exponent) + + elif isinstance(weighting, NumpyTensorSpaceArrayWeighting): + array = weighting.array[indices] + new_weighting = NumpyTensorSpaceArrayWeighting( + array, weighting.exponent) + else: + new_weighting = None + + return new_weighting + + +def slice_weighting_by_axis(weighting, space_shape, indices): + """Return a weighting for a space after indexing by axis. + + The different types of weightings behave as follows: + + - ``ConstWeighting``: preserved + - ``PerAxisWeighting``: preserved, where factors are discarded for + removed axes, repeated for repeated axes, and set to 1 for new + axes + - ``ArrayWeighting``: not preserved, no meaningful way to slice by axis + - Other: not preserved, mapped to ``None``. + """ + try: + iter(indices) + except TypeError: + # Integer, slice or Ellipsis + indices = list(range(len(space_shape)))[indices] + if not isinstance(indices, list): + indices = [indices] + else: + indices = [int(i) for i in indices] + + if isinstance(weighting, NumpyTensorSpaceConstWeighting): + new_weighting = weighting + elif isinstance(weighting, NumpyTensorSpacePerAxisWeighting): + factors = [weighting.factors[i] for i in indices] + new_weighting = NumpyTensorSpacePerAxisWeighting( + factors, weighting.exponent) + + else: + new_weighting = None -# TODO: implement intermediate weighting schemes with arrays that are -# broadcast, i.e. between scalar and full-blown in dimensionality? + return new_weighting class NumpyTensorSpaceArrayWeighting(ArrayWeighting): @@ -2058,7 +2378,7 @@ class NumpyTensorSpaceArrayWeighting(ArrayWeighting): """ def __init__(self, array, exponent=2.0): - """Initialize a new instance. + r"""Initialize a new instance. Parameters ---------- @@ -2076,9 +2396,9 @@ def __init__(self, array, exponent=2.0): :math:`W` is defined as .. math:: - \\langle A, B\\rangle_W := - \\langle W \odot A, B\\rangle = - \\langle w \odot a, b\\rangle = + \langle A, B\rangle_W := + \langle W \odot A, B\rangle = + \langle w \odot a, b\rangle = b^{\mathrm{H}} (w \odot a), where :math:`a, b, w` are the "flattened" counterparts of @@ -2086,44 +2406,47 @@ def __init__(self, array, exponent=2.0): stands for transposed complex conjugate and :math:`w \odot a` for element-wise multiplication. - - For other exponents, only norm and dist are defined. In the - case of exponent :math:`\\infty`, the weighted norm is + - For other exponents, only norm and dist are defined. In the case + of finite exponent, it is (using point-wise exponentiation) .. math:: - \| A\|_{W, \\infty} := - \| W \odot A\|_{\\infty} = - \| w \odot a\|_{\\infty}, + \| A\|_{W, p} := + \| W^{1/p} \odot A\|_p = + \| w^{1/p} \odot a\|_p, - otherwise it is (using point-wise exponentiation) + and for :math:`\pm \infty` we have .. math:: - \| A\|_{W, p} := - \| W^{1/p} \odot A\|_{p} = - \| w^{1/p} \odot a\|_{\\infty}. + \| A\|_{W, \pm \infty} := + \| W \odot A\|_{\pm \infty} = + \| w \odot a\|_{\pm \infty}. - - Note that this definition does **not** fulfill the limit - property in :math:`p`, i.e. + Note that this definition is chosen such that the limit + property in :math:`p` holds, i.e. .. math:: - \| A\|_{W, p} \\not\\to - \| A\|_{W, \\infty} \quad (p \\to \\infty) - - unless all weights are equal to 1. + \| A\|_{W, p} \to + \| A\|_{W, \infty} \quad (p \to \infty). - The array :math:`W` may only have positive entries, otherwise it does not define an inner product or norm, respectively. This is not checked during initialization. """ if isinstance(array, NumpyTensor): - array = array.data - elif not isinstance(array, np.ndarray): - array = np.asarray(array) + array = array + + array = np.asarray(array) + if array.dtype == object: + raise ValueError('got invalid `array` as input') + super(NumpyTensorSpaceArrayWeighting, self).__init__( array, impl='numpy', exponent=exponent) def __hash__(self): """Return ``hash(self)``.""" - return hash((type(self), self.array.tobytes(), self.exponent)) + return hash( + (type(self), array_hash(self.array), self.exponent) + ) def inner(self, x1, x2): """Return the weighted inner product of ``x1`` and ``x2``. @@ -2143,7 +2466,7 @@ def inner(self, x1, x2): 'exponent != 2 (got {})' ''.format(self.exponent)) else: - inner = _inner_default(x1 * self.array, x2) + inner = _inner_impl(x1.data * self.array, x2.data) if is_real_dtype(x1.dtype): return float(inner) else: @@ -2163,12 +2486,284 @@ def norm(self, x): The norm of the provided tensor. """ if self.exponent == 2.0: - norm_squared = self.inner(x, x).real # TODO: optimize?! + norm_squared = self.inner(x, x).real # TODO: optimize? if norm_squared < 0: norm_squared = 0.0 # Compensate for numerical error return float(np.sqrt(norm_squared)) else: - return float(_pnorm_diagweight(x, self.exponent, self.array)) + return float(_pnorm_diagweight_impl(x.data, self.exponent, + self.array)) + + +class NumpyTensorSpacePerAxisWeighting(PerAxisWeighting): + + """Weighting of a space with one weight per axis. + + See Notes for mathematical details. + """ + + def __init__(self, factors, exponent=2.0): + r"""Initialize a new instance. + + Parameters + ---------- + factors : sequence of `array-like` + Weighting factors, one per axis. The factors can be constants + or one-dimensional array-like objects. + exponent : positive float, optional + Exponent of the norm. For values other than 2.0, the inner + product is not defined. + + Notes + ----- + We consider a tensor space :math:`\mathbb{F}^n` of shape + :math:`n \in \mathbb{N}^d` over a field :math:`\mathbb{F}`. + If :math:`0 < v^{(i)} \in \mathbb{R}^{n_i}` are vectors of length + equal to the corresponding axis, their outer product + + .. math:: + v_{k} = \prod_{i=0}^d v^{(i)}_{k_i} + + defines a tensor of shape :math:`n`. With this tensor + we can define a weighted space as follows. + + - For exponent 2.0, a new weighted inner product is given as + + .. math:: + \langle a, b\rangle_v := + \langle v \odot a, b\rangle = + b^{\mathrm{H}} (v \odot a), + + where :math:`b^{\mathrm{H}}` stands for transposed complex + conjugate and ":math:`\odot`" for pointwise product. + + - For other exponents, only norm and dist are defined. In the case + of finite exponent, it is (using point-wise exponentiation) + + .. math:: + \| a \|_{v, p} := \| v^{1/p} \odot a \|_{p}, + + and for :math:`\pm \infty` we have + + .. math:: + \| a \|_{v, \pm \infty} := \| a \|_{\pm \infty}, + + Note that this definition is chosen such that the limit + property in :math:`p` holds, i.e. + + .. math:: + \| a\|_{v, p} \to + \| a \|_{v, \infty} \quad (p \to \infty). + """ + # TODO: allow 3-tuples for `bdry, inner, bdry` type factors + conv_factors = [] + for factor in factors: + factor = np.asarray(factor) + if factor.ndim not in (0, 1): + raise ValueError( + '`factors` must all be scalar or 1-dim. vectors, got ' + '{}-dim. entries'.format(factor.ndim)) + + conv_factors.append(factor) + + super(NumpyTensorSpacePerAxisWeighting, self).__init__( + conv_factors, impl='numpy', exponent=exponent) + + @property + def const_axes(self): + """Tuple of indices in which the factors are constants.""" + return tuple(i for i, fac in enumerate(self.factors) if fac.ndim == 0) + + @property + def consts(self): + """Tuple containing those factors that are constants.""" + return tuple(fac for fac in self.factors if fac.ndim == 0) + + @property + def array_axes(self): + """Tuple of indices in which the factors are arrays.""" + return tuple(i for i, fac in enumerate(self.factors) if fac.ndim == 1) + + @property + def arrays(self): + """Tuple containing those factors that are arrays.""" + return tuple(fac for fac in self.factors if fac.ndim == 1) + + def inner(self, x1, x2): + """Return the weighted inner product of ``x1`` and ``x2``. + + Parameters + ---------- + x1, x2 : `NumpyTensor` + Tensors whose inner product is calculated. + + Returns + ------- + inner : float or complex + The inner product of the two provided tensors. + """ + if self.exponent != 2.0: + raise NotImplementedError('no inner product defined for ' + 'exponent != 2 (got {})' + ''.format(self.exponent)) + + const = np.prod(self.consts) + x1_w = fast_1d_tensor_mult(x1.data, self.arrays, axes=self.array_axes) + inner = const * _inner_impl(x1_w, x2.data) + if x1.space.field is None: + return inner + else: + return x1.space.field.element(inner) + + def norm(self, x): + """Return the weighted norm of ``x``. + + Parameters + ---------- + x1 : `NumpyTensor` + Tensor whose norm is calculated. + + Returns + ------- + norm : float + The norm of the tensor. + """ + const = np.prod(self.consts) + + if self.exponent == 2.0: + arrays = [np.sqrt(arr) for arr in self.arrays] + x_w = fast_1d_tensor_mult(x.data, arrays, axes=self.array_axes) + return float(np.sqrt(const) * _norm_impl(x_w)) + elif self.exponent == float('inf'): + return float(_pnorm_impl(x.data, float('inf'))) + else: + arrays = [np.power(arr, 1 / self.exponent) for arr in self.arrays] + x_w = fast_1d_tensor_mult(x.data, arrays, axes=self.array_axes) + return float(const ** (1 / self.exponent) * + _pnorm_impl(x_w, self.exponent)) + + def dist(self, x1, x2): + """Return the weighted distance between ``x1`` and ``x2``. + + Parameters + ---------- + x1, x2 : `NumpyTensor` + Tensors whose mutual distance is calculated. + + Returns + ------- + dist : float + The distance between the tensors. + """ + const = np.prod(self.consts) + diff = (x1 - x2).data + + if self.exponent == 2.0: + arrays = [np.sqrt(arr) for arr in self.arrays] + fast_1d_tensor_mult(diff, arrays, axes=self.array_axes, out=diff) + return float(np.sqrt(const) * _norm_impl(diff)) + elif self.exponent == float('inf'): + return float(_pnorm_impl(diff, float('inf'))) + else: + arrays = [np.power(arr, 1 / self.exponent) for arr in self.arrays] + fast_1d_tensor_mult(diff, arrays, axes=self.array_axes, out=diff) + return float(const ** (1 / self.exponent) * + _pnorm_impl(diff, self.exponent)) + + def __eq__(self, other): + """Return ``self == other``. + + Returns + ------- + equal : bool + ``True`` if ``other`` is a `NumpyTensorSpacePerAxisWeighting` + instance with the same factors (equal for constants, + *identical* for arrays), ``False`` otherwise. + """ + if other is self: + return True + + if isinstance(other, ConstWeighting): + # Consider per-axis weighting in 1 axis with a constant to + # be equal to constant weighting + return (len(self.factors) == 1 and + self.factors[0].ndim == 0 and + self.factors[0] == other.const) + elif not isinstance(other, NumpyTensorSpacePerAxisWeighting): + return False + + same_const_idcs = (other.const_axes == self.const_axes) + consts_equal = (self.consts == other.consts) + arrs_ident = all(a is b for a, b in zip(self.arrays, other.arrays)) + + return (super(NumpyTensorSpacePerAxisWeighting, self).__eq__(other) and + same_const_idcs and + consts_equal and + arrs_ident) + + def __hash__(self): + """Return ``hash(self)``.""" + return hash( + (super(NumpyTensorSpacePerAxisWeighting, self).__hash__(),) + + tuple(array_hash(fac) for fac in self.factors) + ) + + @property + def repr_part(self): + """String usable in a space's ``__repr__`` method.""" + max_elems = 2 * np.get_printoptions()['edgeitems'] + precision = np.get_printoptions()['precision'] + + def factors_repr(factors): + """Return repr string for the weighting factors part.""" + factor_strs = [] + for fac in factors: + if fac.ndim == 0: + fmt = '{{:.{}}}'.format(precision) + factor_strs.append(fmt.format(float(fac))) + else: + factor_strs.append(array_str(fac, nprint=max_elems)) + if len(factor_strs) == 1: + return factor_strs[0] + else: + return '({})'.format(', '.join(factor_strs)) + + optargs = [] + optmod = [] + if not all(fac.ndim == 0 and fac == 1.0 for fac in self.factors): + optargs.append(('weighting', factors_repr(self.factors), '')) + optmod.append('!s') + + optargs.append(('exponent', self.exponent, 2.0)) + optmod.append('') + + return signature_string([], optargs, mod=[[], optmod]) + + def __repr__(self): + """Return ``repr(self)``.""" + max_elems = 2 * np.get_printoptions()['edgeitems'] + precision = np.get_printoptions()['precision'] + + def factors_repr(factors): + """Return repr string for the weighting factors part.""" + factor_strs = [] + for fac in factors: + if fac.ndim == 0: + fmt = '{{:.{}}}'.format(precision) + factor_strs.append(fmt.format(float(fac))) + else: + factor_strs.append(array_str(fac, nprint=max_elems)) + return '({})'.format(', '.join(factor_strs)) + + posargs = [factors_repr(self.factors)] + optargs = [('exponent', self.exponent, 2.0)] + + inner_str = signature_string(posargs, optargs, mod=['!s', '']) + return '{}(\n{}\n)'.format(self.__class__.__name__, indent(inner_str)) + + def __str__(self): + """Return ``str(self)``.""" + return repr(self) class NumpyTensorSpaceConstWeighting(ConstWeighting): @@ -2179,7 +2774,7 @@ class NumpyTensorSpaceConstWeighting(ConstWeighting): """ def __init__(self, const, exponent=2.0): - """Initialize a new instance. + r"""Initialize a new instance. Parameters ---------- @@ -2195,35 +2790,32 @@ def __init__(self, const, exponent=2.0): :math:`c` is defined as .. math:: - \\langle a, b\\rangle_c := - c \, \\langle a, b\\rangle_c = + \langle a, b\rangle_c := + c \, \langle a, b\rangle_c = c \, b^{\mathrm{H}} a, where :math:`b^{\mathrm{H}}` standing for transposed complex conjugate. - - For other exponents, only norm and dist are defined. In the - case of exponent :math:`\\infty`, the weighted norm is defined - as + - For other exponents, only norm and dist are defined. In the case + of finite exponent, it is .. math:: - \| a \|_{c, \\infty} := - c\, \| a \|_{\\infty}, + \| a \|_{c, p} := + c^{1/p}\, \| a \|_{p}, - otherwise it is + and for :math:`\pm \infty` we have .. math:: - \| a \|_{c, p} := - c^{1/p}\, \| a \|_{p}. + \| a \|_{c, \pm \infty} := + \| a \|_{\pm \infty}. - - Note that this definition does **not** fulfill the limit - property in :math:`p`, i.e. + Note that this definition is chosen such that the limit + property in :math:`p` holds, i.e. .. math:: - \| a\|_{c, p} \\not\\to - \| a \|_{c, \\infty} \quad (p \\to \\infty) - - unless :math:`c = 1`. + \| a\|_{c, p} \to + \| a \|_{c, \infty} \quad (p \to \infty) - The constant must be positive, otherwise it does not define an inner product or norm, respectively. @@ -2248,12 +2840,12 @@ def inner(self, x1, x2): raise NotImplementedError('no inner product defined for ' 'exponent != 2 (got {})' ''.format(self.exponent)) + + inner = self.const * _inner_impl(x1.data, x2.data) + if x1.space.field is None: + return inner else: - inner = self.const * _inner_default(x1, x2) - if x1.space.field is None: - return inner - else: - return x1.space.field.element(inner) + return x1.space.field.element(inner) def norm(self, x): """Return the weighted norm of ``x``. @@ -2269,12 +2861,12 @@ def norm(self, x): The norm of the tensor. """ if self.exponent == 2.0: - return float(np.sqrt(self.const) * _norm_default(x)) + return float(np.sqrt(self.const) * _norm_impl(x.data)) elif self.exponent == float('inf'): - return float(self.const * _pnorm_default(x, self.exponent)) + return float(_pnorm_impl(x.data, self.exponent)) else: - return float((self.const ** (1 / self.exponent) * - _pnorm_default(x, self.exponent))) + return float(self.const ** (1 / self.exponent) * + _pnorm_impl(x.data, self.exponent)) def dist(self, x1, x2): """Return the weighted distance between ``x1`` and ``x2``. @@ -2290,12 +2882,12 @@ def dist(self, x1, x2): The distance between the tensors. """ if self.exponent == 2.0: - return float(np.sqrt(self.const) * _norm_default(x1 - x2)) + return float(np.sqrt(self.const) * _norm_impl((x1 - x2).data)) elif self.exponent == float('inf'): - return float(self.const * _pnorm_default(x1 - x2, self.exponent)) + return float(_pnorm_impl((x1 - x2).data, self.exponent)) else: return float((self.const ** (1 / self.exponent) * - _pnorm_default(x1 - x2, self.exponent))) + _pnorm_impl((x1 - x2).data, self.exponent))) class NumpyTensorSpaceCustomInner(CustomInner): diff --git a/odl/space/pspace.py b/odl/space/pspace.py index 663b41c2aab..7e851023434 100644 --- a/odl/space/pspace.py +++ b/odl/space/pspace.py @@ -10,7 +10,6 @@ from __future__ import print_function, division, absolute_import from itertools import product -from numbers import Integral import numpy as np from odl.set import LinearSpace @@ -18,7 +17,7 @@ from odl.space.weighting import ( Weighting, ArrayWeighting, ConstWeighting, CustomInner, CustomNorm, CustomDist) -from odl.util import is_real_dtype, signature_string, indent +from odl.util import is_real_dtype, signature_string, indent, is_int from odl.util.ufuncs import ProductSpaceUfuncs @@ -213,9 +212,9 @@ class instance can be retrieved from the space by the # Make a power space if the second argument is an integer. # For the case that the integer is 0, we already set the field here. - if len(spaces) == 2 and isinstance(spaces[1], Integral): + if len(spaces) == 2 and is_int(spaces[1]): field = spaces[0].field - spaces = [spaces[0]] * spaces[1] + spaces = [spaces[0]] * int(spaces[1]) # Validate the space arguments if not all(isinstance(spc, LinearSpace) for spc in spaces): @@ -672,8 +671,8 @@ def __getitem__(self, indices): >>> pspace2[:-1, 0] ProductSpace(rn(2), 2) """ - if isinstance(indices, Integral): - return self.spaces[indices] + if is_int(indices): + return self.spaces[int(indices)] elif isinstance(indices, slice): return ProductSpace(*self.spaces[indices], field=self.field) @@ -684,12 +683,12 @@ def __getitem__(self, indices): if not indices: return self idx = indices[0] - if isinstance(idx, Integral): + if is_int(idx): # Single integer in tuple, picking that space and passing # through the rest of the tuple. If the picked space # is not a product space and there are still indices left, # raise an error. - space = self.spaces[idx] + space = self.spaces[int(idx)] rest_indcs = indices[1:] if not rest_indcs: return space @@ -898,8 +897,8 @@ def __eq__(self, other): def __getitem__(self, indices): """Return ``self[indices]``.""" - if isinstance(indices, Integral): - return self.parts[indices] + if is_int(indices): + return self.parts[int(indices)] elif isinstance(indices, slice): return self.space[indices].element(self.parts[indices]) elif isinstance(indices, list): @@ -913,10 +912,10 @@ def __getitem__(self, indices): return self[indices[0]] else: # Tuple with multiple entries - if isinstance(indices[0], Integral): + if is_int(indices[0]): # In case the first entry is an integer, we drop the # axis and return directly from `parts` - return self.parts[indices[0]][indices[1:]] + return self.parts[int(indices[0])][indices[1:]] else: # indices[0] is a slice or list. We first retrieve the # parts indexed in this axis. @@ -949,8 +948,8 @@ def __getitem__(self, indices): def __setitem__(self, indices, values): """Implement ``self[indices] = values``.""" # Get the parts to which we assign values - if isinstance(indices, Integral): - indexed_parts = (self.parts[indices],) + if is_int(indices): + indexed_parts = (self.parts[int(indices)],) values = (values,) elif isinstance(indices, slice): indexed_parts = self.parts[indices] @@ -1311,6 +1310,25 @@ def conj(self): complex_conj = [part.conj() for part in self.parts] return self.space.element(complex_conj) + def astype(self, dtype): + """Cast this product space element to a new ``dtype``. + + Parameters + ---------- + dtype : + Scalar data type of the returned space. Can be provided + in any way the `numpy.dtype` constructor understands, e.g. + as built-in type or as a string. Data types with non-trivial + shapes are not allowed. + + Returns + ------- + newelem : `ProductSpaceElement` + Version of this element with given data type. + """ + return self.space.astype(dtype).element( + [p.astype(dtype) for p in self.parts]) + def __str__(self): """Return ``str(self)``.""" return repr(self) @@ -1439,7 +1457,7 @@ def show(self, title=None, indices=None, **kwargs): else: if (isinstance(indices, tuple) or (isinstance(indices, list) and - not all(isinstance(idx, Integral) for idx in indices))): + not all(is_int(idx) for idx in indices))): # Tuples or lists containing non-integers index by axis. # We use the first index for the current pspace and pass # on the rest. @@ -1452,8 +1470,8 @@ def show(self, title=None, indices=None, **kwargs): if isinstance(indices, slice): indices = list(range(*indices.indices(len(self)))) - elif isinstance(indices, Integral): - indices = [indices] + elif is_int(indices): + indices = [int(indices)] else: # Use `indices` as-is pass diff --git a/odl/space/weighting.py b/odl/space/weighting.py index 8fcabbf50d8..4a24a2cb1d6 100644 --- a/odl/space/weighting.py +++ b/odl/space/weighting.py @@ -13,11 +13,13 @@ import numpy as np from odl.space.base_tensors import TensorSpace -from odl.util import array_str, signature_string, indent +from odl.util import ( + array_str, array_hash, signature_string, indent, fast_1d_tensor_mult, + writable_array) __all__ = ('MatrixWeighting', 'ArrayWeighting', 'ConstWeighting', - 'CustomInner', 'CustomNorm', 'CustomDist') + 'CustomInner', 'CustomNorm', 'CustomDist', 'adjoint_weightings') class Weighting(object): @@ -84,20 +86,6 @@ def __hash__(self): """Return ``hash(self)``.""" return hash((type(self), self.impl, self.exponent)) - def equiv(self, other): - """Test if ``other`` is an equivalent weighting. - - Should be overridden, default tests for equality. - - Returns - ------- - equivalent : bool - ``True`` if ``other`` is a `Weighting` instance which - yields the same result as this inner product for any - input, ``False`` otherwise. - """ - return self == other - def inner(self, x1, x2): """Return the inner product of two elements. @@ -355,72 +343,9 @@ def __eq__(self, other): def __hash__(self): """Return ``hash(self)``.""" - # TODO: Better hash for matrix? - return hash((super(MatrixWeighting, self).__hash__(), - self.matrix.tobytes())) - - def equiv(self, other): - """Test if other is an equivalent weighting. - - Returns - ------- - equivalent : bool - ``True`` if ``other`` is a `Weighting` instance with the same - `Weighting.impl`, which yields the same result as this - weighting for any input, ``False`` otherwise. This is checked - by entry-wise comparison of matrices/arrays/constants. - """ - # Lazy import to improve `import odl` time - import scipy.sparse - - # Optimization for equality - if self == other: - return True - - elif self.exponent != getattr(other, 'exponent', -1): - return False - - elif isinstance(other, MatrixWeighting): - if self.matrix.shape != other.matrix.shape: - return False - - if scipy.sparse.isspmatrix(self.matrix): - if other.matrix_issparse: - # Optimization for different number of nonzero elements - if self.matrix.nnz != other.matrix.nnz: - return False - else: - # Most efficient out-of-the-box comparison - return (self.matrix != other.matrix).nnz == 0 - else: # Worst case: compare against dense matrix - return np.array_equal(self.matrix.todense(), other.matrix) - - else: # matrix of `self` is dense - if other.matrix_issparse: - return np.array_equal(self.matrix, other.matrix.todense()) - else: - return np.array_equal(self.matrix, other.matrix) - - elif isinstance(other, ArrayWeighting): - if scipy.sparse.isspmatrix(self.matrix): - return (np.array_equiv(self.matrix.diagonal(), - other.array) and - np.array_equal(self.matrix.asformat('dia').offsets, - np.array([0]))) - else: - return np.array_equal( - self.matrix, other.array * np.eye(self.matrix.shape[0])) - - elif isinstance(other, ConstWeighting): - if scipy.sparse.isspmatrix(self.matrix): - return (np.array_equiv(self.matrix.diagonal(), other.const) and - np.array_equal(self.matrix.asformat('dia').offsets, - np.array([0]))) - else: - return np.array_equal( - self.matrix, other.const * np.eye(self.matrix.shape[0])) - else: - return False + return hash( + (super(MatrixWeighting, self).__hash__(), array_hash(self.matrix)) + ) @property def repr_part(self): @@ -524,32 +449,9 @@ def __eq__(self, other): def __hash__(self): """Return ``hash(self)``.""" # TODO: Better hash for array? - return hash((super(ArrayWeighting, self).__hash__(), - self.array.tobytes())) - - def equiv(self, other): - """Return True if other is an equivalent weighting. - - Returns - ------- - equivalent : bool - ``True`` if ``other`` is a `Weighting` instance with the same - `Weighting.impl`, which yields the same result as this - weighting for any input, ``False`` otherwise. This is checked - by entry-wise comparison of arrays/constants. - """ - # Optimization for equality - if self == other: - return True - elif (not isinstance(other, Weighting) or - self.exponent != other.exponent): - return False - elif isinstance(other, MatrixWeighting): - return other.equiv(self) - elif isinstance(other, ConstWeighting): - return np.array_equiv(self.array, other.const) - else: - return np.array_equal(self.array, other.array) + return hash( + (super(ArrayWeighting, self).__hash__(), array_hash(self.array)) + ) @property def repr_part(self): @@ -572,6 +474,40 @@ def __str__(self): return repr(self) +class PerAxisWeighting(Weighting): + + """Weighting of a space with one weight per axis.""" + + def __init__(self, factors, impl, exponent=2.0): + """Initialize a new instance. + + Parameters + ---------- + factors : sequence of `array-like` + Weighting factors, one per axis. The factors can be constants + or one-dimensional array-like objects. + impl : string + Specifier for the implementation backend. + exponent : positive float, optional + Exponent of the norm. For values other than 2.0, the inner + product is not defined. + """ + super(PerAxisWeighting, self).__init__(impl=impl, exponent=exponent) + try: + iter(factors) + except TypeError: + raise TypeError('`factors` {!r} is not a sequence'.format(factors)) + self.__factors = tuple(factors) + + @property + def factors(self): + """Tuple of weighting factors for inner product, norm and distance.""" + return self.__factors + + # No further methods implemented here since that will require some + # knowledge on the array type + + class ConstWeighting(Weighting): """Weighting of a space by a constant.""" @@ -622,28 +558,10 @@ def __hash__(self): """Return ``hash(self)``.""" return hash((super(ConstWeighting, self).__hash__(), self.const)) - def equiv(self, other): - """Test if other is an equivalent weighting. - - Returns - ------- - equivalent : bool - ``True`` if other is a `Weighting` instance with the same - `Weighting.impl`, which yields the same result as this - weighting for any input, ``False`` otherwise. This is checked - by entry-wise comparison of matrices/arrays/constants. - """ - if isinstance(other, ConstWeighting): - return self == other - elif isinstance(other, (ArrayWeighting, MatrixWeighting)): - return other.equiv(self) - else: - return False - @property def repr_part(self): """String usable in a space's ``__repr__`` method.""" - optargs = [('weighting', self.const, 1.0), + optargs = [('weighting', self.const, None), ('exponent', self.exponent, 2.0)] return signature_string([], optargs, mod=':.4') @@ -871,6 +789,207 @@ def __repr__(self): return '{}({})'.format(self.__class__.__name__, inner_str) +def adjoint_weightings(adj_domain, adj_range, extra_factor=1.0, + merge_1d_arrs=False): + """Return functions that together implement correct adjoint weighting. + + This function determines the (presumably) best strategy to merge + weights and distribute the constant, depending on the involved + weighting types and space sizes. + + Generally, the domain weighting factors are multiplied with, and the + range weighting factors appear as reciprocals. + + The optimization logic is thus as follows: + + 1. For both domain and range, constant factors, 1D arrays and nD arrays + are determined. + 2. The constants are merged into ``const = dom_const / ran_const``. + 3. If either domain or range have 1D weighting arrays, the constant + is merged into the smallest of those. + Otherwise, the constant is used as multiplicative factor in the + smaller of the two spaces. + 4. If desired, 1D arrays are merged and used on the smaller space. + 5. Two functions are returned that implement the optimized domain + and range weightings. They have ``out`` parameters that let them + operate in-place if necessary. + + Parameters + ---------- + adj_domain, adj_range : `TensorSpace` + Domain and range of an adjoint operator for which the weighting + strategy should be determined. + extra_factor : float, optional + Multiply the determined weighting constant by this value. This is for + situations where additional scalar corrections are necessary. + merge_1d_arrs : bool, optional + If ``True``, try to merge the 1D arrays in axes where they occur + in both domain and range. This is intended as an optimization for + operators that have identical (or compatible) domain and range, + at least in the axes in question. + + Returns + ------- + apply_dom_weighting, apply_ran_weighting : function + Python functions with signature ``apply(x, out=None)`` that + implement the optimized strategy for domain and range, returning + a Numpy array. If a function does not do anything, it returns its + input ``x`` as Numpy array without making a copy. + """ + dom_w = adj_domain.weighting + ran_w = adj_range.weighting + dom_size = adj_domain.size + ran_size = adj_range.size + const = extra_factor + + # Get relevant factors from the weightings + + # Domain weightings go in as multiplicative factors + if isinstance(dom_w, ArrayWeighting): + const *= 1.0 + dom_ndarr = dom_w.array + dom_1darrs = [] + dom_1darr_axes = [] + elif isinstance(dom_w, PerAxisWeighting): + const *= np.prod(dom_w.consts) + dom_ndarr = None + dom_1darrs = list(dom_w.arrays) + dom_1darr_axes = list(dom_w.array_axes) + elif isinstance(dom_w, ConstWeighting): + const *= dom_w.const + dom_ndarr = None + dom_1darrs = [] + dom_1darr_axes = [] + else: + raise TypeError('weighting type {} of the adjoint domain not ' + 'supported'.format(type(dom_w))) + + # Range weightings go in as reciprocal factors. For the later merging + # of constants and arrays we proceed with the reciprocal factors. + if isinstance(ran_w, ArrayWeighting): + const /= 1.0 + ran_ndarr = ran_w.array + ran_1darrs_recip = [] + ran_1darr_axes = [] + elif isinstance(ran_w, PerAxisWeighting): + const /= np.prod(ran_w.consts) + ran_ndarr = None + ran_1darrs_recip = [1 / arr for arr in ran_w.arrays] + ran_1darr_axes = list(ran_w.array_axes) + elif isinstance(ran_w, ConstWeighting): + const /= ran_w.const + ran_ndarr = None + ran_1darrs_recip = [] + ran_1darr_axes = [] + else: + raise TypeError('weighting type {} of the adjoint range not ' + 'supported'.format(type(ran_w))) + + # Put constant where it "hurts least", i.e., to the smaller space if + # both use constant weighting or if an array weighting is involved; + # if 1d arrays are in play, multiply the constant with one of them. + if const == 1.0: + dom_const = ran_const = 1.0 + elif dom_1darrs != []: + idx_smallest = np.argmin([len(arr) for arr in dom_1darrs]) + dom_1darrs[idx_smallest] = dom_1darrs[idx_smallest] * const + dom_const = ran_const = 1.0 + elif ran_1darrs_recip != []: + idx_smallest = np.argmin([len(arr) for arr in ran_1darrs_recip]) + ran_1darrs_recip[idx_smallest] *= const # has been copied already + dom_const = ran_const = 1.0 + else: + if dom_size < ran_size: + dom_const = const + ran_const = 1.0 + else: + dom_const = 1.0 + ran_const = const + + # If desired, merge 1d arrays; this works if domain and range have the + # same shape + if merge_1d_arrs: + if adj_domain.ndim != adj_range.ndim: + raise ValueError('merging only works with domain and range of ' + 'the same number of dimensions') + + for i in range(adj_domain.ndim): + if i in dom_1darr_axes and i in ran_1darr_axes: + if dom_size < ran_size: + dom_1darrs[i] = dom_1darrs[i] * ran_1darrs_recip[i] + ran_1darrs_recip.pop(ran_1darr_axes.index(i)) + ran_1darr_axes.remove(i) + else: + ran_1darrs_recip[i] *= dom_1darrs[i] # already copied + dom_1darrs.pop(dom_1darr_axes.index(i)) + dom_1darr_axes.remove(i) + + # Define weighting functions and return them + def apply_dom_weighting(x, out=None): + inp = x + if dom_ndarr is not None: + # TODO: use first variant when Numpy 1.13 is minimum + if isinstance(x, np.ndarray): + out = np.multiply(x, dom_ndarr, out=out) + else: + out = x.ufuncs.multiply(dom_ndarr, out=out) + inp = out + if dom_const != 1.0: + if isinstance(x, np.ndarray): + out = np.multiply(x, dom_const, out=out) + else: + out = inp.ufuncs.multiply(dom_const, out=out) + inp = out + if dom_1darrs != []: + if out is None: + out = fast_1d_tensor_mult(inp, dom_1darrs, dom_1darr_axes) + else: + # TODO: use impl when available + with writable_array(out) as out_arr: + fast_1d_tensor_mult(inp, dom_1darrs, dom_1darr_axes, + out=out_arr) + + if out is None: + # Nothing has been done, return input as Numpy array. This will + # not copy. + return np.asarray(x) + else: + return np.asarray(out) + + def apply_ran_weighting(x, out=None): + inp = x + if ran_ndarr is not None: + # TODO: use first variant when Numpy 1.13 is minimum + if isinstance(x, np.ndarray): + out = np.divide(x, ran_ndarr, out=out) + else: + out = x.ufuncs.divide(ran_ndarr, out=out) + inp = out + if ran_const != 1.0: + if isinstance(x, np.ndarray): + out = np.multiply(x, ran_const, out=out) + else: + out = inp.ufuncs.multiply(ran_const, out=out) + inp = out + if ran_1darrs_recip != []: + if out is None: + out = fast_1d_tensor_mult(inp, ran_1darrs_recip, + ran_1darr_axes) + else: + with writable_array(out) as out_arr: + fast_1d_tensor_mult(inp, ran_1darrs_recip, ran_1darr_axes, + out=out_arr) + + if out is None: + # Nothing has been done, return input as Numpy array. This will + # not copy. + return np.asarray(x) + else: + return np.asarray(out) + + return apply_dom_weighting, apply_ran_weighting + + if __name__ == '__main__': from odl.util.testutils import run_doctests run_doctests() diff --git a/odl/test/discr/discr_mappings_test.py b/odl/test/discr/discr_mappings_test.py index 3cab6e85d80..e17b6077433 100644 --- a/odl/test/discr/discr_mappings_test.py +++ b/odl/test/discr/discr_mappings_test.py @@ -28,7 +28,7 @@ def test_nearest_interpolation_1d_complex(odl_tspace_impl): # Coordinate vectors are: # [0.1, 0.3, 0.5, 0.7, 0.9] - fspace = odl.FunctionSpace(intv, out_dtype=complex) + fspace = odl.FunctionSpace(intv, dtype_out=complex) tspace = odl.cn(part.shape) interp_op = NearestInterpolation(fspace, part, tspace) function = interp_op([0 + 1j, 1 + 2j, 2 + 3j, 3 + 4j, 4 + 5j]) @@ -131,7 +131,7 @@ def test_nearest_interpolation_2d_string(): # Coordinate vectors are: # [0.125, 0.375, 0.625, 0.875], [0.25, 0.75] - fspace = odl.FunctionSpace(rect, out_dtype='U1') + fspace = odl.FunctionSpace(rect, dtype_out='U1') tspace = odl.tensor_space(part.shape, dtype='U1') interp_op = NearestInterpolation(fspace, part, tspace) values = np.array([c for c in 'mystring']).reshape(tspace.shape) diff --git a/odl/test/discr/lp_discr_test.py b/odl/test/discr/lp_discr_test.py index b97a63b57df..286afe972b8 100644 --- a/odl/test/discr/lp_discr_test.py +++ b/odl/test/discr/lp_discr_test.py @@ -15,7 +15,7 @@ from odl.discr.lp_discr import DiscreteLp, DiscreteLpElement from odl.space.base_tensors import TensorSpace from odl.space.npy_tensors import NumpyTensor -from odl.space.weighting import ConstWeighting +from odl.space.weighting import PerAxisWeighting from odl.util.testutils import ( all_equal, all_almost_equal, noise_elements, simple_fixture) @@ -30,6 +30,9 @@ power = simple_fixture('power', [1.0, 2.0, 0.5, -0.5, -1.0, -2.0]) shape = simple_fixture('shape', [(2, 3, 4), (3, 4), (2,), (1,), (1, 1, 1)]) power = simple_fixture('power', [1.0, 2.0, 0.5, -0.5, -1.0, -2.0]) +shaped_dtype = simple_fixture( + 'shaped_dtype', + [('float', ()), ('float32', (3,)), ('float', (2, 3))]) # --- DiscreteLp --- # @@ -62,7 +65,7 @@ def test_discretelp_init(): # Complex space fspace_c = odl.FunctionSpace(odl.IntervalProd([0, 0], [1, 1]), - out_dtype=complex) + dtype_out=complex) tspace_c = odl.cn(part.shape) discr = DiscreteLp(fspace_c, part, tspace_c) assert discr.is_complex @@ -86,28 +89,10 @@ def test_discretelp_init(): DiscreteLp(fspace, part_diffshp, tspace) # shape mismatch -def test_empty(): - """Check if empty spaces behave as expected and all methods work.""" - discr = odl.uniform_discr([], [], ()) - - assert discr.interp == 'nearest' - assert discr.axis_labels == () - assert discr.tangent_bundle == odl.ProductSpace(field=odl.RealNumbers()) - assert discr.complex_space == odl.uniform_discr([], [], (), dtype=complex) - hash(discr) - assert repr(discr) != '' - - elem = discr.element(1.0) - assert np.array_equal(elem.asarray(), 1.0) - assert np.array_equal(elem.real, 1.0) - assert np.array_equal(elem.imag, 0.0) - assert np.array_equal(elem.conj(), 1.0) - - # --- uniform_discr --- # -def test_factory_dtypes(odl_tspace_impl): +def test_uniform_discr_dtypes(odl_tspace_impl): impl = odl_tspace_impl real_float_dtypes = [np.float32, np.float64] nonfloat_dtypes = [np.int8, np.int16, np.int32, np.int64, @@ -195,6 +180,66 @@ def test_uniform_discr_init_complex(odl_tspace_impl): assert discr.dtype == discr.tspace.default_dtype(odl.ComplexNumbers()) +def test_uniform_discr_empty(): + """Check if empty spaces behave as expected and all methods work.""" + discr = odl.uniform_discr([], [], ()) + + assert discr.interp == 'nearest' + assert discr.axis_labels == () + assert discr.tangent_bundle == odl.ProductSpace(field=odl.RealNumbers()) + assert discr.complex_space == odl.uniform_discr([], [], (), dtype=complex) + assert hash(discr) == hash(discr) + assert repr(discr) != '' + + elem = discr.element(1.0) + assert np.array_equal(elem.asarray(), 1.0) + assert np.array_equal(elem.real, 1.0) + assert np.array_equal(elem.imag, 0.0) + assert np.array_equal(elem.conj(), 1.0) + + +def test_uniform_discr_shaped_dtypes(shaped_dtype): + """Check handling of dtypes with shape in uniform_discr.""" + discr = odl.uniform_discr([0, 0], [1, 1], shape=(3, 4), dtype=shaped_dtype) + + # Check space shape + shaped_dtype = np.dtype(shaped_dtype) + assert discr.dtype == shaped_dtype + assert discr.scalar_dtype == shaped_dtype.base + assert discr.shape == shaped_dtype.shape + (3, 4) + + # Create new element + x = discr.zero() + assert x.shape == shaped_dtype.shape + (3, 4) + assert x in discr + assert x.tensor in discr.tspace + + # Create element from array + x = discr.element(np.ones(shaped_dtype.shape + (3, 4))) + assert x in discr + + # Create element from function + def f_scal(x): + return x[0] ** 2 + 1 + + def f_vec(x): + return [x[1] + 2, 1, x[0] + x[1]] + + def f_tens(x): + return [[0, x[0], x[1]], + [x[0] - x[1] ** 2, 1, x[1]]] + + if shaped_dtype.shape == (): + x = discr.element(f_scal) + elif shaped_dtype.shape == (3,): + x = discr.element(f_vec) + elif shaped_dtype.shape == (2, 3): + x = discr.element(f_tens) + else: + assert False + + assert x in discr + # --- DiscreteLp methods --- # @@ -211,7 +256,7 @@ def test_discretelp_element(): # 2D discr = odl.uniform_discr([0, 0], [1, 1], (3, 3)) - weight = 1.0 if exponent == float('inf') else discr.cell_volume + weight = 1.0 if exponent == float('inf') else discr.cell_sides tspace = odl.rn((3, 3), weighting=weight) elem = discr.element() assert elem in discr @@ -550,13 +595,13 @@ def test_getslice(): discr = odl.uniform_discr(0, 1, 3) elem = discr.element([1, 2, 3]) - assert isinstance(elem[:], NumpyTensor) + assert isinstance(elem[:], DiscreteLpElement) assert all_equal(elem[:], [1, 2, 3]) discr = odl.uniform_discr(0, 1, 3, dtype='complex') elem = discr.element([1 + 2j, 2 - 2j, 3]) - assert isinstance(elem[:], NumpyTensor) + assert isinstance(elem[:], DiscreteLpElement) assert all_equal(elem[:], [1 + 2j, 2 - 2j, 3]) @@ -635,15 +680,15 @@ def test_setitem_nd(): elem[:] = arr.T # bad shape (2, 3) # nD - shape = (3,) * 3 + (4,) * 3 - discr = odl.uniform_discr([0] * 6, [1] * 6, shape) + shape = (3, 4, 5) + discr = odl.uniform_discr([0] * 3, [1] * 3, shape) size = np.prod(shape) elem = discr.element(np.zeros(shape)) arr = np.arange(size).reshape(shape) elem[:] = arr - assert all_equal(elem, arr) + assert np.array_equal(elem, arr) elem[:] = 0 assert all_equal(elem, np.zeros(elem.shape)) @@ -1053,13 +1098,13 @@ def test_ufunc_corner_cases(odl_tspace_impl): res = x.__array_ufunc__(np.add, 'reduce', x, axis=(0, 1)) assert res == pytest.approx(np.add.reduce(x.asarray(), axis=(0, 1))) - # Constant weighting should be preserved (recomputed from cell - # volume) + # Per-axis weighting should be sliced accordingly y = space.one() res = y.__array_ufunc__(np.add, 'reduce', y, axis=0) - assert res.space.weighting.const == pytest.approx(space.cell_sides[1]) + assert isinstance(res.space.weighting, PerAxisWeighting) + assert res.space.weighting.factors == pytest.approx([space.cell_sides[1]]) - # Check that `exponent` is propagated + # Check that `exponent` is being propagated space_1 = odl.uniform_discr([0, 0], [1, 1], (2, 3), impl=impl, exponent=1) z = space_1.one() @@ -1069,21 +1114,34 @@ def test_ufunc_corner_cases(odl_tspace_impl): # --- `outer` --- # # Check that weightings are propagated correctly + # Same space -> concatenate factors x = y = space.one() res = x.__array_ufunc__(np.add, 'outer', x, y) - assert isinstance(res.space.weighting, ConstWeighting) - assert res.space.weighting.const == pytest.approx(x.space.weighting.const * - y.space.weighting.const) + assert isinstance(res.space.weighting, PerAxisWeighting) + true_factors = x.space.weighting.factors * 2 + assert res.space.weighting.factors == pytest.approx(true_factors) + # Other space 2D and const weighting != 1 -> const weighting 1 x = space.one() - y = space_no_w.one() + y = odl.uniform_discr([0, 0], [1, 1], (2, 3), weighting=0.5).one() res = x.__array_ufunc__(np.add, 'outer', x, y) - assert isinstance(res.space.weighting, ConstWeighting) - assert res.space.weighting.const == pytest.approx(x.space.weighting.const) + assert not res.space.is_weighted - x = y = space_no_w.one() + # Other space 2D and const weighting == 1 -> concatenate with per-axis 1 + x = space.one() + y = odl.uniform_discr([0, 0], [1, 1], (2, 3), weighting=1).one() res = x.__array_ufunc__(np.add, 'outer', x, y) - assert not res.space.is_weighted + assert isinstance(res.space.weighting, PerAxisWeighting) + true_factors = x.space.weighting.factors + (1, 1) + assert res.space.weighting.factors == pytest.approx(true_factors) + + # Other space 2D and per-axis weighting -> concatenate + x = space.one() + y = odl.uniform_discr([0, 0], [1, 1], (2, 3), weighting=(0.5, 1.5)).one() + res = x.__array_ufunc__(np.add, 'outer', x, y) + assert isinstance(res.space.weighting, PerAxisWeighting) + true_factors = x.space.weighting.factors + y.space.weighting.factors + assert res.space.weighting.factors == pytest.approx(true_factors) def test_real_imag(odl_tspace_impl, odl_elem_order): @@ -1277,7 +1335,7 @@ def test_norm_rectangle_boundary(odl_tspace_impl, exponent): dtype = 'float32' rect = odl.IntervalProd([-1, -2], [1, 2]) - fspace = odl.FunctionSpace(rect, out_dtype=dtype) + fspace = odl.FunctionSpace(rect, dtype_out=dtype) # Standard case discr = odl.uniform_discr_fromspace(fspace, (4, 8), impl=impl, diff --git a/odl/test/operator/tensor_ops_test.py b/odl/test/operator/tensor_ops_test.py index 415b2515cfe..faf12c3603a 100644 --- a/odl/test/operator/tensor_ops_test.py +++ b/odl/test/operator/tensor_ops_test.py @@ -17,7 +17,7 @@ from odl.operator.tensor_ops import ( PointwiseNorm, PointwiseInner, PointwiseSum, MatrixOperator) from odl.space.pspace import ProductSpace -from odl.util import moveaxis +from odl.util.npy_compat import moveaxis from odl.util.testutils import ( all_almost_equal, all_equal, simple_fixture, noise_element, noise_elements) diff --git a/odl/test/set/sets_test.py b/odl/test/set/sets_test.py index a8b1288d476..138f78a3b86 100644 --- a/odl/test/set/sets_test.py +++ b/odl/test/set/sets_test.py @@ -168,10 +168,10 @@ def test_integers(): assert -1 in Z assert 1 in Z assert 0 in Z + assert -44 in Z - assert -1.0 not in Z - assert 1.0 not in Z - assert 0.0 not in Z + assert 2.0 not in Z + assert -1.1 not in Z assert 2j not in Z assert 2 + 2j not in Z assert 'a' not in Z diff --git a/odl/test/solvers/functional/default_functionals_test.py b/odl/test/solvers/functional/default_functionals_test.py index ab15771402c..8fe48fc32eb 100644 --- a/odl/test/solvers/functional/default_functionals_test.py +++ b/odl/test/solvers/functional/default_functionals_test.py @@ -14,6 +14,7 @@ import pytest import odl +from odl.util import npy_erroroptions from odl.util.testutils import all_almost_equal, noise_element, simple_fixture from odl.solvers.functional.default_functionals import ( KullbackLeiblerConvexConj, KullbackLeiblerCrossEntropyConvexConj) @@ -343,11 +344,10 @@ def test_kullback_leibler(space): assert cc_cc_func(x) == pytest.approx(func(x)) -def test_kullback_leibler_cross_entorpy(space): +def test_kullback_leibler_cross_entropy(space): """Test the kullback leibler cross entropy and its convex conjugate.""" # The prior needs to be positive - prior = noise_element(space) - prior = space.element(np.abs(prior)) + prior = noise_element(space).ufuncs.absolute() + 0.1 func = odl.solvers.KullbackLeiblerCrossEntropy(space, prior) @@ -397,8 +397,9 @@ def test_kullback_leibler_cross_entorpy(space): assert all_almost_equal(cc_func.gradient(x), expected_result) # The proximal of the convex conjugate - expected_result = (x - - scipy.special.lambertw(sigma * prior * np.exp(x)).real) + with npy_erroroptions(complex='ignore'): + expected_result = ( + x - scipy.special.lambertw(sigma * prior * np.exp(x)).real) assert all_almost_equal(cc_func.proximal(sigma)(x), expected_result) # The biconjugate, which is the functional itself since it is proper, diff --git a/odl/test/space/fspace_test.py b/odl/test/space/fspace_test.py index c8996829c50..63de0265f6a 100644 --- a/odl/test/space/fspace_test.py +++ b/odl/test/space/fspace_test.py @@ -64,11 +64,10 @@ class FuncList(list): # So we can set __name__ # --- pytest fixtures (general) --- # -out_dtype_params = ['float32', 'float64', 'complex64'] -out_dtype = simple_fixture('out_dtype', out_dtype_params, +dtype_out = simple_fixture('dtype_out', ['float32', 'float64', 'complex64'], fmt=' {name} = {value!r} ') -out_shape = simple_fixture('out_shape', [(), (2,), (2, 3)]) +shape_out = simple_fixture('shape_out', [(), (2,), (2, 3)]) domain_ndim = simple_fixture('domain_ndim', [1, 2]) vectorized = simple_fixture('vectorized', [True, False]) a = simple_fixture('a', [0.0, 1.0, -2.0]) @@ -77,10 +76,10 @@ class FuncList(list): # So we can set __name__ @pytest.fixture(scope='module') -def fspace_scal(domain_ndim, out_dtype): +def fspace_scal(domain_ndim, dtype_out): """Fixture returning a function space with given properties.""" domain = odl.IntervalProd([0] * domain_ndim, [1] * domain_ndim) - return FunctionSpace(domain, out_dtype=out_dtype) + return FunctionSpace(domain, dtype_out=dtype_out) # --- pytest fixtures (scalar test functions) --- # @@ -380,15 +379,15 @@ def test_fspace_init(): """Check if all initialization patterns work.""" intv = odl.IntervalProd(0, 1) FunctionSpace(intv) - FunctionSpace(intv, out_dtype=float) - FunctionSpace(intv, out_dtype=complex) - FunctionSpace(intv, out_dtype=(float, (2, 3))) + FunctionSpace(intv, dtype_out=float) + FunctionSpace(intv, dtype_out=complex) + FunctionSpace(intv, dtype_out=(float, (2, 3))) str3 = odl.Strings(3) - FunctionSpace(str3, out_dtype=int) + FunctionSpace(str3, dtype_out=int) # Make sure repr shows something - assert repr(FunctionSpace(intv, out_dtype=(float, (2, 3)))) + assert repr(FunctionSpace(intv, dtype_out=(float, (2, 3)))) def test_fspace_attributes(): @@ -397,9 +396,9 @@ def test_fspace_attributes(): # Scalar-valued function spaces fspace = FunctionSpace(intv) - fspace_r = FunctionSpace(intv, out_dtype=float) - fspace_c = FunctionSpace(intv, out_dtype=complex) - fspace_s = FunctionSpace(intv, out_dtype='U1') + fspace_r = FunctionSpace(intv, dtype_out=float) + fspace_c = FunctionSpace(intv, dtype_out=complex) + fspace_s = FunctionSpace(intv, dtype_out='U1') scalar_spaces = (fspace, fspace_r, fspace_c, fspace_s) assert fspace.domain == intv @@ -408,14 +407,14 @@ def test_fspace_attributes(): assert fspace_c.field == odl.ComplexNumbers() assert fspace_s.field is None - assert fspace.out_dtype == float - assert fspace_r.out_dtype == float - assert fspace_r.real_out_dtype == float - assert fspace_r.complex_out_dtype == complex - assert fspace_c.out_dtype == complex - assert fspace_c.real_out_dtype == float - assert fspace_c.complex_out_dtype == complex - assert fspace_s.out_dtype == np.dtype('U1') + assert fspace.dtype_out == float + assert fspace_r.dtype_out == float + assert fspace_r.real_dtype_out == float + assert fspace_r.complex_dtype_out == complex + assert fspace_c.dtype_out == complex + assert fspace_c.real_dtype_out == float + assert fspace_c.complex_dtype_out == complex + assert fspace_s.dtype_out == np.dtype('U1') assert fspace.is_real assert not fspace.is_complex assert fspace_r.is_real @@ -423,20 +422,20 @@ def test_fspace_attributes(): assert fspace_c.is_complex assert not fspace_c.is_real with pytest.raises(AttributeError): - fspace_s.real_out_dtype + fspace_s.real_dtype_out with pytest.raises(AttributeError): - fspace_s.complex_out_dtype + fspace_s.complex_dtype_out - assert all(spc.scalar_out_dtype == spc.out_dtype for spc in scalar_spaces) - assert all(spc.out_shape == () for spc in scalar_spaces) + assert all(spc.scalar_dtype_out == spc.dtype_out for spc in scalar_spaces) + assert all(spc.shape_out == () for spc in scalar_spaces) assert all(not spc.tensor_valued for spc in scalar_spaces) # Vector-valued function space - fspace_vec = FunctionSpace(intv, out_dtype=(float, (2,))) + fspace_vec = FunctionSpace(intv, dtype_out=(float, (2,))) assert fspace_vec.field == odl.RealNumbers() - assert fspace_vec.out_dtype == np.dtype((float, (2,))) - assert fspace_vec.scalar_out_dtype == float - assert fspace_vec.out_shape == (2,) + assert fspace_vec.dtype_out == np.dtype((float, (2,))) + assert fspace_vec.scalar_dtype_out == float + assert fspace_vec.shape_out == (2,) assert fspace_vec.tensor_valued @@ -445,10 +444,10 @@ def test_equals(): intv = odl.IntervalProd(0, 1) intv2 = odl.IntervalProd(-1, 1) fspace = FunctionSpace(intv) - fspace_r = FunctionSpace(intv, out_dtype=float) - fspace_c = FunctionSpace(intv, out_dtype=complex) + fspace_r = FunctionSpace(intv, dtype_out=float) + fspace_c = FunctionSpace(intv, dtype_out=complex) fspace_intv2 = FunctionSpace(intv2) - fspace_vec = FunctionSpace(intv, out_dtype=(float, (2,))) + fspace_vec = FunctionSpace(intv, dtype_out=(float, (2,))) _test_eq(fspace, fspace) _test_eq(fspace, fspace_r) @@ -460,11 +459,11 @@ def test_equals(): def test_fspace_astype(): - """Check that converting function spaces to new out_dtype works.""" + """Check that converting function spaces to new dtype_out works.""" rspace = FunctionSpace(odl.IntervalProd(0, 1)) - cspace = FunctionSpace(odl.IntervalProd(0, 1), out_dtype=complex) - rspace_s = FunctionSpace(odl.IntervalProd(0, 1), out_dtype='float32') - cspace_s = FunctionSpace(odl.IntervalProd(0, 1), out_dtype='complex64') + cspace = FunctionSpace(odl.IntervalProd(0, 1), dtype_out=complex) + rspace_s = FunctionSpace(odl.IntervalProd(0, 1), dtype_out='float32') + cspace_s = FunctionSpace(odl.IntervalProd(0, 1), dtype_out='complex64') assert rspace.astype('complex64') == cspace_s assert rspace.astype('complex128') == cspace @@ -489,7 +488,7 @@ def test_fspace_elem_vectorized_init(vectorized): fspace_scal = FunctionSpace(intv) fspace_scal.element(func_nd_oop, vectorized=vectorized) - fspace_vec = FunctionSpace(intv, out_dtype=(float, (2,))) + fspace_vec = FunctionSpace(intv, dtype_out=(float, (2,))) fspace_vec.element(func_vec_nd_oop, vectorized=vectorized) fspace_vec.element(func_nd_oop_seq, vectorized=vectorized) @@ -514,14 +513,14 @@ def test_fspace_scal_elem_eval(fspace_scal, func_nd): result_mesh = func_elem(mesh) assert all_almost_equal(result_points, true_values_points) assert all_almost_equal(result_mesh, true_values_mesh) - assert result_points.dtype == fspace_scal.scalar_out_dtype - assert result_mesh.dtype == fspace_scal.scalar_out_dtype + assert result_points.dtype == fspace_scal.scalar_dtype_out + assert result_mesh.dtype == fspace_scal.scalar_dtype_out assert result_points.flags.writeable assert result_mesh.flags.writeable # In place - out_points = np.empty(3, dtype=fspace_scal.scalar_out_dtype) - out_mesh = np.empty(mesh_shape, dtype=fspace_scal.scalar_out_dtype) + out_points = np.empty(3, dtype=fspace_scal.scalar_dtype_out) + out_mesh = np.empty(mesh_shape, dtype=fspace_scal.scalar_dtype_out) func_elem(points, out=out_points) func_elem(mesh, out=out_mesh) assert all_almost_equal(out_points, true_values_points) @@ -554,15 +553,15 @@ def test_fspace_scal_elem_with_param_eval(func_param_nd): assert all_almost_equal(result_mesh, true_values_mesh) # In place - out_points = np.empty(3, dtype=fspace_scal.scalar_out_dtype) - out_mesh = np.empty(mesh_shape, dtype=fspace_scal.scalar_out_dtype) + out_points = np.empty(3, dtype=fspace_scal.scalar_dtype_out) + out_mesh = np.empty(mesh_shape, dtype=fspace_scal.scalar_dtype_out) func_elem(points, out=out_points, c=2.5) func_elem(mesh, out=out_mesh, c=2.5) assert all_almost_equal(out_points, true_values_points) assert all_almost_equal(out_mesh, true_values_mesh) # Complex output - fspace_complex = FunctionSpace(intv, out_dtype=complex) + fspace_complex = FunctionSpace(intv, dtype_out=complex) true_values_points = func_ref(points, c=2j) true_values_mesh = func_ref(mesh, c=2j) @@ -574,10 +573,10 @@ def test_fspace_scal_elem_with_param_eval(func_param_nd): assert all_almost_equal(result_mesh, true_values_mesh) -def test_fspace_vec_elem_eval(func_vec_nd, out_dtype): +def test_fspace_vec_elem_eval(func_vec_nd, dtype_out): """Check evaluation of scalar-valued function elements.""" intv = odl.IntervalProd([0, 0], [1, 1]) - fspace_vec = FunctionSpace(intv, out_dtype=(float, (2,))) + fspace_vec = FunctionSpace(intv, dtype_out=(float, (2,))) points = _points(fspace_vec.domain, 3) mesh_shape = (2, 3) mesh = _meshgrid(fspace_vec.domain, mesh_shape) @@ -598,16 +597,16 @@ def test_fspace_vec_elem_eval(func_vec_nd, out_dtype): result_mesh = func_elem(mesh) assert all_almost_equal(result_points, true_values_points) assert all_almost_equal(result_mesh, true_values_mesh) - assert result_points.dtype == fspace_vec.scalar_out_dtype - assert result_mesh.dtype == fspace_vec.scalar_out_dtype + assert result_points.dtype == fspace_vec.scalar_dtype_out + assert result_mesh.dtype == fspace_vec.scalar_dtype_out assert result_points.flags.writeable assert result_mesh.flags.writeable # In place out_points = np.empty(values_points_shape, - dtype=fspace_vec.scalar_out_dtype) + dtype=fspace_vec.scalar_dtype_out) out_mesh = np.empty(values_mesh_shape, - dtype=fspace_vec.scalar_out_dtype) + dtype=fspace_vec.scalar_dtype_out) func_elem(points, out=out_points) func_elem(mesh, out=out_mesh) assert all_almost_equal(out_points, true_values_points) @@ -616,7 +615,7 @@ def test_fspace_vec_elem_eval(func_vec_nd, out_dtype): # Single point evaluation result_point = func_elem(point) assert all_almost_equal(result_point, true_value_point) - out_point = np.empty((2,), dtype=fspace_vec.scalar_out_dtype) + out_point = np.empty((2,), dtype=fspace_vec.scalar_dtype_out) func_elem(point, out=out_point) assert all_almost_equal(out_point, true_value_point) @@ -624,7 +623,7 @@ def test_fspace_vec_elem_eval(func_vec_nd, out_dtype): def test_fspace_tens_eval(func_tens): """Test tensor-valued function evaluation.""" intv = odl.IntervalProd([0, 0], [1, 1]) - fspace_tens = FunctionSpace(intv, out_dtype=(float, (2, 3))) + fspace_tens = FunctionSpace(intv, dtype_out=(float, (2, 3))) points = _points(fspace_tens.domain, 4) mesh_shape = (4, 5) mesh = _meshgrid(fspace_tens.domain, mesh_shape) @@ -665,7 +664,7 @@ def test_fspace_tens_eval(func_tens): def test_fspace_elem_eval_unusual_dtypes(): """Check evaluation with unusual data types (int and string).""" str3 = odl.Strings(3) - fspace = FunctionSpace(str3, out_dtype=int) + fspace = FunctionSpace(str3, dtype_out=int) strings = np.array(['aa', 'b', 'cab', 'aba']) out_vec = np.empty((4,), dtype=int) @@ -683,7 +682,7 @@ def test_fspace_elem_eval_unusual_dtypes(): def test_fspace_elem_eval_vec_1d(func_vec_1d): """Test evaluation in 1d since it's a corner case regarding shapes.""" intv = odl.IntervalProd(0, 1) - fspace_vec = FunctionSpace(intv, out_dtype=(float, (2,))) + fspace_vec = FunctionSpace(intv, dtype_out=(float, (2,))) points = _points(fspace_vec.domain, 3) mesh_shape = (4,) mesh = _meshgrid(fspace_vec.domain, mesh_shape) @@ -757,7 +756,7 @@ def test_fspace_elem_equality(): assert f_vec_dual == f_vec_dual assert f_vec_dual == f_vec_dual_2 - fspace_tens = FunctionSpace(intv, out_dtype=(float, (2, 3))) + fspace_tens = FunctionSpace(intv, dtype_out=(float, (2, 3))) f_tens_oop = fspace_tens.element(func_tens_oop) f_tens_oop2 = fspace_tens.element(func_tens_oop) @@ -782,12 +781,12 @@ def test_fspace_elem_equality(): assert f_tens_seq != f_tens_seq2 -def test_fspace_elem_assign(out_shape): +def test_fspace_elem_assign(shape_out): """Check assignment of fspace elements.""" fspace = FunctionSpace(odl.IntervalProd(0, 1), - out_dtype=(float, out_shape)) + dtype_out=(float, shape_out)) - ndim = len(out_shape) + ndim = len(shape_out) if ndim == 0: f_oop = fspace.element(func_nd_oop) f_ip = fspace.element(func_nd_ip) @@ -816,12 +815,12 @@ def test_fspace_elem_assign(out_shape): assert f_out == f_dual -def test_fspace_elem_copy(out_shape): +def test_fspace_elem_copy(shape_out): """Check copying of fspace elements.""" fspace = FunctionSpace(odl.IntervalProd(0, 1), - out_dtype=(float, out_shape)) + dtype_out=(float, shape_out)) - ndim = len(out_shape) + ndim = len(shape_out) if ndim == 0: f_oop = fspace.element(func_nd_oop) f_ip = fspace.element(func_nd_ip) @@ -847,12 +846,12 @@ def test_fspace_elem_copy(out_shape): assert f_out == f_dual -def test_fspace_elem_real_imag_conj(out_shape): +def test_fspace_elem_real_imag_conj(shape_out): """Check taking real/imaginary parts of fspace elements.""" fspace = FunctionSpace(odl.IntervalProd(0, 1), - out_dtype=(complex, out_shape)) + dtype_out=(complex, shape_out)) - ndim = len(out_shape) + ndim = len(shape_out) if ndim == 0: f_elem = fspace.element(func_complex_nd_oop) elif ndim == 1: @@ -866,8 +865,8 @@ def test_fspace_elem_real_imag_conj(out_shape): mesh_shape = (5,) mesh = _meshgrid(fspace.domain, mesh_shape) point = 0.5 - values_points_shape = out_shape + (4,) - values_mesh_shape = out_shape + mesh_shape + values_points_shape = shape_out + (4,) + values_mesh_shape = shape_out + mesh_shape result_points = f_elem(points) result_point = f_elem(point) @@ -908,24 +907,24 @@ def test_fspace_elem_real_imag_conj(out_shape): assert all_almost_equal(out_mesh, result_mesh.conj()) -def test_fspace_zero(out_shape): +def test_fspace_zero(shape_out): """Check zero element.""" fspace = FunctionSpace(odl.IntervalProd(0, 1), - out_dtype=(float, out_shape)) + dtype_out=(float, shape_out)) points = _points(fspace.domain, 4) mesh_shape = (5,) mesh = _meshgrid(fspace.domain, mesh_shape) point = 0.5 - values_points_shape = out_shape + (4,) - values_point_shape = out_shape - values_mesh_shape = out_shape + mesh_shape + values_points_shape = shape_out + (4,) + values_point_shape = shape_out + values_mesh_shape = shape_out + mesh_shape f_zero = fspace.zero() assert all_equal(f_zero(points), np.zeros(values_points_shape)) - if not out_shape: + if not shape_out: assert f_zero(point) == 0.0 else: assert all_equal(f_zero(point), np.zeros(values_point_shape)) @@ -941,24 +940,24 @@ def test_fspace_zero(out_shape): assert all_equal(out_mesh, np.zeros(values_mesh_shape)) -def test_fspace_one(out_shape): +def test_fspace_one(shape_out): """Check one element.""" fspace = FunctionSpace(odl.IntervalProd(0, 1), - out_dtype=(float, out_shape)) + dtype_out=(float, shape_out)) points = _points(fspace.domain, 4) mesh_shape = (5,) mesh = _meshgrid(fspace.domain, mesh_shape) point = 0.5 - values_points_shape = out_shape + (4,) - values_point_shape = out_shape - values_mesh_shape = out_shape + mesh_shape + values_points_shape = shape_out + (4,) + values_point_shape = shape_out + values_mesh_shape = shape_out + mesh_shape f_one = fspace.one() assert all_equal(f_one(points), np.ones(values_points_shape)) - if not out_shape: + if not shape_out: assert f_one(point) == 1.0 else: assert all_equal(f_one(point), np.ones(values_point_shape)) @@ -1067,16 +1066,16 @@ def test_fspace_lincomb_scalar(a, b): assert all_equal(out(points), true_result_aligned) -def test_fspace_lincomb_vec_tens(a, b, out_shape): +def test_fspace_lincomb_vec_tens(a, b, shape_out): """Check linear combination in function spaces.""" - if out_shape == (): + if shape_out == (): return intv = odl.IntervalProd([0, 0], [1, 1]) - fspace = FunctionSpace(intv, out_dtype=(float, out_shape)) + fspace = FunctionSpace(intv, dtype_out=(float, shape_out)) points = _points(fspace.domain, 4) - ndim = len(out_shape) + ndim = len(shape_out) if ndim == 1: f_elem1 = fspace.element(func_vec_nd_oop) f_elem2 = fspace.element(func_vec_nd_other) @@ -1092,7 +1091,7 @@ def test_fspace_lincomb_vec_tens(a, b, out_shape): out_func = fspace.element() fspace.lincomb(a, f_elem1, b, f_elem2, out_func) assert all_equal(out_func(points), true_result) - out_arr = np.empty(out_shape + (4,)) + out_arr = np.empty(shape_out + (4,)) out_func(points, out=out_arr) assert all_equal(out_arr, true_result) @@ -1100,14 +1099,14 @@ def test_fspace_lincomb_vec_tens(a, b, out_shape): # NOTE: multiply and divide are tested via special methods -def test_fspace_elem_power(power, out_shape): +def test_fspace_elem_power(power, shape_out): """Check taking powers of fspace elements.""" # Make sure test functions don't take negative values intv = odl.IntervalProd([1, 0], [2, 1]) - fspace = FunctionSpace(intv, out_dtype=(float, out_shape)) + fspace = FunctionSpace(intv, dtype_out=(float, shape_out)) points = _points(fspace.domain, 4) - ndim = len(out_shape) + ndim = len(shape_out) with np.errstate(all='ignore'): if ndim == 0: f_elem = fspace.element(func_nd_oop) @@ -1124,7 +1123,7 @@ def test_fspace_elem_power(power, out_shape): # Out-of-place power f_elem_pow = f_elem ** power assert all_almost_equal(f_elem_pow(points), true_result) - out_arr = np.empty(out_shape + (4,)) + out_arr = np.empty(shape_out + (4,)) f_elem_pow(points, out_arr) assert all_almost_equal(out_arr, true_result) @@ -1132,20 +1131,20 @@ def test_fspace_elem_power(power, out_shape): f_elem_pow = f_elem.copy() f_elem_pow **= power assert all_almost_equal(f_elem_pow(points), true_result) - out_arr = np.empty(out_shape + (4,)) + out_arr = np.empty(shape_out + (4,)) f_elem_pow(points, out_arr) assert all_almost_equal(out_arr, true_result) -def test_fspace_elem_arithmetic(odl_arithmetic_op, out_shape): +def test_fspace_elem_arithmetic(odl_arithmetic_op, shape_out): """Test arithmetic of fspace elements.""" op = odl_arithmetic_op intv = odl.IntervalProd([1, 0], [2, 1]) - fspace = FunctionSpace(intv, out_dtype=(float, out_shape)) + fspace = FunctionSpace(intv, dtype_out=(float, shape_out)) points = _points(fspace.domain, 4) - ndim = len(out_shape) + ndim = len(shape_out) if ndim == 0: f_elem1 = fspace.element(func_nd_oop) f_elem2 = fspace.element(func_nd_other) @@ -1169,8 +1168,8 @@ def test_fspace_elem_arithmetic(odl_arithmetic_op, out_shape): func_arith_scal = op(f_elem1_cpy, -2.0) assert all_almost_equal(func_arith_func(points), true_result_func) assert all_almost_equal(func_arith_scal(points), true_result_scal) - out_arr_func = np.empty(out_shape + (4,)) - out_arr_scal = np.empty(out_shape + (4,)) + out_arr_func = np.empty(shape_out + (4,)) + out_arr_scal = np.empty(shape_out + (4,)) func_arith_func(points, out=out_arr_func) func_arith_scal(points, out=out_arr_scal) assert all_almost_equal(out_arr_func, true_result_func) diff --git a/odl/test/space/pspace_test.py b/odl/test/space/pspace_test.py index ad7acd1ad51..72e2ffc9759 100644 --- a/odl/test/space/pspace_test.py +++ b/odl/test/space/pspace_test.py @@ -12,6 +12,7 @@ import operator import odl +from odl.util import npy_erroroptions from odl.util.testutils import ( all_equal, all_almost_equal, noise_elements, noise_element, simple_fixture) @@ -991,48 +992,50 @@ def test_real_imag_and_conj(): def test_real_setter_product_space(space, newpart): """Verify that the setter for the real part of an element works.""" x = noise_element(space) - x.real = newpart - try: - # Catch the scalar - iter(newpart) - except TypeError: - expected_result = newpart * space.one() - else: - if newpart in space: - expected_result = newpart.real - elif np.shape(newpart) == (3,): - expected_result = [newpart, newpart] + with npy_erroroptions(complex='ignore'): + x.real = newpart + try: + iter(newpart) + except TypeError: + # Scalar case + expected_real = newpart.real * space.real_space.one() else: - expected_result = newpart + if newpart in space: + expected_real = newpart.real + elif np.shape(newpart) == (3,): + # Broadcasting + expected_real = [newpart, newpart] + else: + expected_real = newpart assert x in space - assert all_equal(x.real, expected_result) + assert all_equal(x.real, expected_real) def test_imag_setter_product_space(space, newpart): """Verify that the setter for the imaginary part of an element works.""" x = noise_element(space) - x.imag = newpart - try: - # Catch the scalar - iter(newpart) - except TypeError: - expected_result = newpart * space.one() - else: - if newpart in space: - # The imaginary part is by definition real, and thus the new - # imaginary part is thus the real part of the element we try to set - # the value to - expected_result = newpart.real - elif np.shape(newpart) == (3,): - expected_result = [newpart, newpart] + with npy_erroroptions(complex='ignore'): + x.imag = newpart + try: + iter(newpart) + except TypeError: + # Scalar case + expected_imag = newpart.real * space.real_space.one() else: - expected_result = newpart + # For complex RHS, the *imaginary* part will be thrown away + if newpart in space: + expected_imag = newpart.real + elif np.shape(newpart) == (3,): + # Broadcasting + expected_imag = [newpart, newpart] + else: + expected_imag = newpart assert x in space - assert all_equal(x.imag, expected_result) + assert all_equal(x.imag, expected_imag) if __name__ == '__main__': diff --git a/odl/test/space/tensors_test.py b/odl/test/space/tensors_test.py index a5e20976b88..323932a38b4 100644 --- a/odl/test/space/tensors_test.py +++ b/odl/test/space/tensors_test.py @@ -22,6 +22,7 @@ NumpyTensorSpaceConstWeighting, NumpyTensorSpaceArrayWeighting, NumpyTensorSpaceCustomInner, NumpyTensorSpaceCustomNorm, NumpyTensorSpaceCustomDist) +from odl.space.weighting import PerAxisWeighting from odl.util.testutils import ( all_almost_equal, all_equal, simple_fixture, noise_array, noise_element, noise_elements) @@ -185,7 +186,7 @@ def test_init_tspace_weighting(weight, exponent, odl_tspace_impl): # Errors for bad input with pytest.raises(ValueError): - badly_sized = np.ones((2, 4)) + badly_sized = np.ones((4, 4)) odl.tensor_space((3, 4), weighting=badly_sized, impl=impl) if impl == 'numpy': @@ -741,7 +742,6 @@ def test_element_getitem(odl_tspace_impl, getitem_indices): assert sliced_spc.shape == sliced_shape assert sliced_spc.dtype == space.dtype assert sliced_spc.exponent == space.exponent - assert sliced_spc.weighting == space.weighting # Check that we have a view that manipulates the original array # (or not, depending on indexing style) @@ -796,7 +796,9 @@ def test_element_getitem_bool_array(odl_tspace_impl): assert sliced_spc.shape == x_arr_sliced.shape assert sliced_spc.dtype == space.dtype assert sliced_spc.exponent == space.exponent - assert sliced_spc.weighting == space.weighting + + +# TODO: test for weight propagation def test_element_setitem_bool_array(odl_tspace_impl): @@ -1069,44 +1071,6 @@ def test_array_weighting_equals(odl_tspace_impl): assert weighting_arr != weighting_other_exp -def test_array_weighting_equiv(odl_tspace_impl): - """Test the equiv method of Numpy array weightings.""" - impl = odl_tspace_impl - space = odl.rn(5, impl=impl) - weight_arr = _pos_array(space) - weight_elem = space.element(weight_arr) - different_arr = weight_arr + 1 - - arr_weighting_cls = _weighting_cls(impl, 'array') - w_arr = arr_weighting_cls(weight_arr) - w_elem = arr_weighting_cls(weight_elem) - w_different_arr = arr_weighting_cls(different_arr) - - # Equal -> True - assert w_arr.equiv(w_arr) - assert w_arr.equiv(w_elem) - # Different array -> False - assert not w_arr.equiv(w_different_arr) - - # Test shortcuts in the implementation - const_arr = np.ones(space.shape) * 1.5 - - const_weighting_cls = _weighting_cls(impl, 'const') - w_const_arr = arr_weighting_cls(const_arr) - w_const = const_weighting_cls(1.5) - w_wrong_const = const_weighting_cls(1) - w_wrong_exp = const_weighting_cls(1.5, exponent=1) - - assert w_const_arr.equiv(w_const) - assert not w_const_arr.equiv(w_wrong_const) - assert not w_const_arr.equiv(w_wrong_exp) - - # Bogus input - assert not w_const_arr.equiv(True) - assert not w_const_arr.equiv(object) - assert not w_const_arr.equiv(None) - - def test_array_weighting_inner(tspace): """Test inner product in a weighted space.""" [xarr, yarr], [x, y] = noise_elements(tspace, 2) @@ -1131,13 +1095,12 @@ def test_array_weighting_norm(tspace, exponent): weighting = NumpyTensorSpaceArrayWeighting(weight_arr, exponent=exponent) if exponent == float('inf'): - true_norm = np.linalg.norm( - (weight_arr * xarr).ravel(), - ord=float('inf')) + true_norm = np.linalg.norm(xarr.ravel(), ord=float('inf')) else: true_norm = np.linalg.norm( (weight_arr ** (1 / exponent) * xarr).ravel(), - ord=exponent) + ord=exponent + ) assert weighting.norm(x) == pytest.approx(true_norm, rel=rtol) @@ -1151,9 +1114,7 @@ def test_array_weighting_dist(tspace, exponent): weighting = NumpyTensorSpaceArrayWeighting(weight_arr, exponent=exponent) if exponent == float('inf'): - true_dist = np.linalg.norm( - (weight_arr * (xarr - yarr)).ravel(), - ord=float('inf')) + true_dist = np.linalg.norm((xarr - yarr).ravel(), ord=float('inf')) else: true_dist = np.linalg.norm( (weight_arr ** (1 / exponent) * (xarr - yarr)).ravel(), @@ -1180,7 +1141,7 @@ def test_const_weighting_init(odl_tspace_impl, exponent): def test_const_weighting_comparison(odl_tspace_impl): - """Test equality to and equivalence with const weightings.""" + """Test equality to with const weightings.""" impl = odl_tspace_impl constant = 1.5 @@ -1200,23 +1161,12 @@ def test_const_weighting_comparison(odl_tspace_impl): assert w_const == w_const assert w_const == w_const2 assert w_const2 == w_const - # Different but equivalent - assert w_const.equiv(w_const_arr) assert w_const != w_const_arr - # Not equivalent - assert not w_const.equiv(w_other_exp) assert w_const != w_other_exp - assert not w_const.equiv(w_other_const) assert w_const != w_other_const - assert not w_const.equiv(w_other_const_arr) assert w_const != w_other_const_arr - # Bogus input - assert not w_const.equiv(True) - assert not w_const.equiv(object) - assert not w_const.equiv(None) - def test_const_weighting_inner(tspace): """Test inner product with const weighting.""" @@ -1240,7 +1190,7 @@ def test_const_weighting_norm(tspace, exponent): constant = 1.5 if exponent == float('inf'): - factor = constant + factor = 1.0 else: factor = constant ** (1 / exponent) true_norm = factor * np.linalg.norm(xarr.ravel(), ord=exponent) @@ -1255,7 +1205,7 @@ def test_const_weighting_dist(tspace, exponent): constant = 1.5 if exponent == float('inf'): - factor = constant + factor = 1.0 else: factor = constant ** (1 / exponent) true_dist = factor * np.linalg.norm((xarr - yarr).ravel(), ord=exponent) @@ -1626,17 +1576,14 @@ def test_ufunc_corner_cases(odl_tspace_impl): res = x.__array_ufunc__(np.add, 'reduce', x, axis=(0, 1)) assert res == pytest.approx(np.add.reduce(x.asarray(), axis=(0, 1))) - # Cannot propagate weightings in a meaningful way, check that there are - # none in the result - y = space_const_w.one() - res = y.__array_ufunc__(np.add, 'reduce', y, axis=0) - assert not res.space.is_weighted - y = space_arr_w.one() + # Per-axis weighting should be sliced accordingly + y = odl.rn((2, 3), weighting=(3, 4)).one() res = y.__array_ufunc__(np.add, 'reduce', y, axis=0) - assert not res.space.is_weighted + assert isinstance(res.space.weighting, PerAxisWeighting) + assert res.space.weighting.factors == pytest.approx([4]) - # Check that `exponent` is propagated - space_1 = odl.rn((2, 3), exponent=1) + # Check that `exponent` is being propagated + space_1 = odl.rn((2, 3), impl=impl, exponent=1) z = space_1.one() res = z.__array_ufunc__(np.add, 'reduce', z, axis=0) assert res.space.exponent == 1 diff --git a/odl/test/trafos/backends/pyfftw_bindings_test.py b/odl/test/trafos/backends/pyfftw_bindings_test.py index d380ea280af..546d43c1ee5 100644 --- a/odl/test/trafos/backends/pyfftw_bindings_test.py +++ b/odl/test/trafos/backends/pyfftw_bindings_test.py @@ -72,17 +72,16 @@ def test_pyfftw_call_forward(odl_floating_dtype): if dtype == np.dtype('float16'): # not supported, skipping return - halfcomplex, out_dtype = _params_from_dtype(dtype) - + halfcomplex, dtype_out = _params_from_dtype(dtype) for shape in [(10,), (3, 4, 5)]: arr = _random_array(shape, dtype) if halfcomplex: true_dft = np.fft.rfftn(arr) - dft_arr = np.empty(_halfcomplex_shape(shape), dtype=out_dtype) + dft_arr = np.empty(_halfcomplex_shape(shape), dtype=dtype_out) else: true_dft = np.fft.fftn(arr) - dft_arr = np.empty(shape, dtype=out_dtype) + dft_arr = np.empty(shape, dtype=dtype_out) pyfftw_call(arr, dft_arr, direction='forward', halfcomplex=halfcomplex, preserve_input=False) @@ -292,19 +291,18 @@ def test_pyfftw_call_forward_with_axes(odl_floating_dtype): if dtype == np.dtype('float16'): # not supported, skipping return - halfcomplex, out_dtype = _params_from_dtype(dtype) + halfcomplex, dtype_out = _params_from_dtype(dtype) shape = (3, 4, 5) - test_axes = [(0, 1), [1], (-1,), (1, 0), (-1, -2, -3)] for axes in test_axes: arr = _random_array(shape, dtype) if halfcomplex: true_dft = np.fft.rfftn(arr, axes=axes) dft_arr = np.empty(_halfcomplex_shape(shape, axes), - dtype=out_dtype) + dtype=dtype_out) else: true_dft = np.fft.fftn(arr, axes=axes) - dft_arr = np.empty(shape, dtype=out_dtype) + dft_arr = np.empty(shape, dtype=dtype_out) pyfftw_call(arr, dft_arr, direction='forward', axes=axes, halfcomplex=halfcomplex) diff --git a/odl/test/trafos/backends/pywt_bindings_test.py b/odl/test/trafos/backends/pywt_bindings_test.py index 1b3eb9639e7..dbd8b066d79 100644 --- a/odl/test/trafos/backends/pywt_bindings_test.py +++ b/odl/test/trafos/backends/pywt_bindings_test.py @@ -22,6 +22,7 @@ pywt_flat_array_from_coeffs, pywt_coeffs_from_flat_array, pywt_single_level_decomp, pywt_multi_level_decomp, pywt_multi_level_recon) +from odl.util import is_int from odl.util.testutils import (all_almost_equal, all_equal, noise_array, simple_fixture) @@ -111,14 +112,17 @@ def _grouped_and_flat_arrays(shapes, dtype): i.e. the array with shape ``shapes[0]`` appears once, while the others appear ``2 ** ndim - 1`` times each. """ - space = odl.discr_sequence_space(shape=shapes[0], dtype=dtype) + shapes = [[int(shape)] if is_int(shape) else shape + for shape in shapes] + space = odl.uniform_discr([0] * len(shapes[0]), shapes[0], shapes[0], + dtype=dtype) array = noise_array(space).reshape(space.shape) grouped_list = [array] flat_list = [array.ravel()] ndim = space.ndim for shape in shapes[1:]: - space = odl.discr_sequence_space(shape=shape, dtype=dtype) + space = odl.uniform_discr([0] * len(shape), shape, shape, dtype=dtype) arrays = [noise_array(space).reshape(shape) for _ in range(2 ** ndim - 1)] grouped_list.append(tuple(arrays)) diff --git a/odl/test/trafos/fourier_test.py b/odl/test/trafos/fourier_test.py index c6a7f095f7c..bc0dddbdc11 100644 --- a/odl/test/trafos/fourier_test.py +++ b/odl/test/trafos/fourier_test.py @@ -53,12 +53,12 @@ def sinc(x): def test_dft_init(impl): # Just check if the code runs at all shape = (4, 5) - dom = odl.discr_sequence_space(shape) + dom = odl.uniform_discr([0, 0], shape, shape) dom_nonseq = odl.uniform_discr([0, 0], [1, 1], shape) - dom_f32 = odl.discr_sequence_space(shape, dtype='float32') - ran = odl.discr_sequence_space(shape, dtype='complex128') - ran_c64 = odl.discr_sequence_space(shape, dtype='complex64') - ran_hc = odl.discr_sequence_space((3, 5), dtype='complex128') + dom_f32 = odl.uniform_discr([0, 0], shape, shape, dtype='float32') + ran = odl.uniform_discr([0, 0], shape, shape, dtype=complex) + ran_c64 = odl.uniform_discr([0, 0], shape, shape, dtype='complex64') + ran_hc = odl.uniform_discr([0, 0], [2, 4], (3, 5), dtype=complex) # Implicit range DiscreteFourierTransform(dom, impl=impl) @@ -82,8 +82,8 @@ def test_dft_init(impl): def test_dft_init_raise(): # Test different error scenarios shape = (4, 5) - dom = odl.discr_sequence_space(shape) - dom_f32 = odl.discr_sequence_space(shape, dtype='float32') + dom = odl.uniform_discr([0, 0], shape, shape) + dom_f32 = odl.uniform_discr([0, 0], shape, shape, dtype='float32') # Bad types with pytest.raises(TypeError): @@ -103,36 +103,36 @@ def test_dft_init_raise(): DiscreteFourierTransform(dom, axes=(1, -3)) # Badly shaped range - bad_ran = odl.discr_sequence_space((3, 5), dtype='complex128') + bad_ran = odl.uniform_discr([0, 0], [1, 1], (3, 5), dtype='complex128') with pytest.raises(ValueError): DiscreteFourierTransform(dom, bad_ran) - bad_ran = odl.discr_sequence_space((10, 10), dtype='complex128') + bad_ran = odl.uniform_discr([0, 0], [1, 1], (10, 10), dtype='complex128') with pytest.raises(ValueError): DiscreteFourierTransform(dom, bad_ran) - bad_ran = odl.discr_sequence_space((4, 5), dtype='complex128') + bad_ran = odl.uniform_discr([0, 0], [1, 1], (4, 5), dtype='complex128') with pytest.raises(ValueError): DiscreteFourierTransform(dom, bad_ran, halfcomplex=True) - bad_ran = odl.discr_sequence_space((4, 3), dtype='complex128') + bad_ran = odl.uniform_discr([0, 0], [1, 1], (4, 3), dtype='complex128') with pytest.raises(ValueError): DiscreteFourierTransform(dom, bad_ran, halfcomplex=True, axes=(0,)) # Bad data types - bad_ran = odl.discr_sequence_space(shape, dtype='complex64') + bad_ran = odl.uniform_discr([0, 0], [1, 1], (4, 5), dtype='complex64') with pytest.raises(ValueError): DiscreteFourierTransform(dom, bad_ran) - bad_ran = odl.discr_sequence_space(shape, dtype='float64') + bad_ran = odl.uniform_discr([0, 0], [1, 1], (4, 5), dtype='float64') with pytest.raises(ValueError): DiscreteFourierTransform(dom, bad_ran) - bad_ran = odl.discr_sequence_space((4, 3), dtype='float64') + bad_ran = odl.uniform_discr([0, 0], [1, 1], (4, 3), dtype='float64') with pytest.raises(ValueError): DiscreteFourierTransform(dom, bad_ran, halfcomplex=True) - bad_ran = odl.discr_sequence_space((4, 3), dtype='complex128') + bad_ran = odl.uniform_discr([0, 0], [1, 1], (4, 3), dtype='complex128') with pytest.raises(ValueError): DiscreteFourierTransform(dom_f32, bad_ran, halfcomplex=True) @@ -144,25 +144,24 @@ def test_dft_init_raise(): def test_dft_range(): # 1d shape = 10 - dom = odl.discr_sequence_space(shape, dtype='complex128') + dom = odl.uniform_discr(0, shape, shape, dtype='complex128') fft = DiscreteFourierTransform(dom) - true_ran = odl.discr_sequence_space(shape, dtype='complex128') - assert fft.range == true_ran + assert fft.range == dom # 3d shape = (3, 4, 5) - ran = odl.discr_sequence_space(shape, dtype='complex64') - fft = DiscreteFourierTransform(ran) - true_ran = odl.discr_sequence_space(shape, dtype='complex64') - assert fft.range == true_ran + dom = odl.uniform_discr([0, 0, 0], shape, shape, dtype='complex64') + fft = DiscreteFourierTransform(dom) + assert fft.range == dom # 3d, with axes and halfcomplex shape = (3, 4, 5) axes = (-1, -2) ran_shape = (3, 3, 5) - dom = odl.discr_sequence_space(shape, dtype='float32') + dom = odl.uniform_discr([0, 0, 0], shape, shape, dtype='float32') fft = DiscreteFourierTransform(dom, axes=axes, halfcomplex=True) - true_ran = odl.discr_sequence_space(ran_shape, dtype='complex64') + true_ran = odl.uniform_discr([0, 0, 0], ran_shape, ran_shape, + dtype='complex64') assert fft.range == true_ran @@ -173,10 +172,10 @@ def test_idft_init(impl): # Just check if the code runs at all; this uses the init function of # DiscreteFourierTransform, so we don't need exhaustive tests here shape = (4, 5) - ran = odl.discr_sequence_space(shape, dtype='complex128') - ran_hc = odl.discr_sequence_space(shape, dtype='float64') - dom = odl.discr_sequence_space(shape, dtype='complex128') - dom_hc = odl.discr_sequence_space((3, 5), dtype='complex128') + ran = odl.uniform_discr([0, 0], shape, shape, dtype='complex128') + ran_hc = odl.uniform_discr([0, 0], shape, shape, dtype='float64') + dom = odl.uniform_discr([0, 0], shape, shape, dtype='complex128') + dom_hc = odl.uniform_discr([0, 0], [3, 5], (3, 5), dtype='complex128') # Implicit range DiscreteFourierTransformInverse(dom, impl=impl) @@ -191,7 +190,7 @@ def test_dft_call(impl): # 2d, complex, all ones and random back & forth shape = (4, 5) - dft_dom = odl.discr_sequence_space(shape, dtype='complex64') + dft_dom = odl.uniform_discr([0, 0], shape, shape, dtype='complex64') dft = DiscreteFourierTransform(domain=dft_dom, impl=impl) idft = DiscreteFourierTransformInverse(range=dft_dom, impl=impl) @@ -225,7 +224,7 @@ def test_dft_call(impl): # 2d, halfcomplex, first axis shape = (4, 5) axes = 0 - dft_dom = odl.discr_sequence_space(shape, dtype='float32') + dft_dom = odl.uniform_discr([0, 0], shape, shape, dtype='float32') dft = DiscreteFourierTransform(domain=dft_dom, impl=impl, halfcomplex=True, axes=axes) idft = DiscreteFourierTransformInverse(range=dft_dom, impl=impl, @@ -258,7 +257,7 @@ def test_dft_sign(impl): # 2d, complex, all ones and random back & forth shape = (4, 5) - dft_dom = odl.discr_sequence_space(shape, dtype='complex64') + dft_dom = odl.uniform_discr([0, 0], shape, shape, dtype='complex64') dft_minus = DiscreteFourierTransform(domain=dft_dom, impl=impl, sign='-') dft_plus = DiscreteFourierTransform(domain=dft_dom, impl=impl, sign='+') @@ -279,7 +278,7 @@ def test_dft_sign(impl): # 2d, halfcomplex, first axis shape = (4, 5) axes = (0,) - dft_dom = odl.discr_sequence_space(shape, dtype='float32') + dft_dom = odl.uniform_discr([0, 0], shape, shape, dtype='float32') arr = dft_dom.element([[0, 0, 0, 0, 0], [0, 0, 1, 1, 0], [0, 0, 1, 1, 0], @@ -303,7 +302,7 @@ def test_dft_init_plan(impl): # 2d, halfcomplex, first axis shape = (4, 5) axes = 0 - dft_dom = odl.discr_sequence_space(shape, dtype='float32') + dft_dom = odl.uniform_discr([0, 0], shape, shape, dtype='float32') dft = DiscreteFourierTransform(dft_dom, impl=impl, axes=axes, halfcomplex=True) @@ -512,7 +511,7 @@ def char_interval(x): def char_interval_ft(x): return np.exp(-1j * x / 2) * sinc(x / 2) / np.sqrt(2 * np.pi) - fspace = odl.FunctionSpace(odl.IntervalProd(-2, 2), out_dtype=complex) + fspace = odl.FunctionSpace(odl.IntervalProd(-2, 2), dtype_out=complex) discr = odl.uniform_discr_fromspace(fspace, 40, impl='numpy') dft = FourierTransform(discr) diff --git a/odl/test/util/numerics_test.py b/odl/test/util/numerics_test.py index a4a3fec702a..0a20acea22f 100644 --- a/odl/test/util/numerics_test.py +++ b/odl/test/util/numerics_test.py @@ -234,6 +234,11 @@ def simple_mult_3(x, y, z): fast_1d_tensor_mult(test_arr, [x, y, z], out=out) assert all_equal(out, true_result) + # Empty array sequence results in no-op + test_arr = np.ones(shape) + out = fast_1d_tensor_mult(test_arr, []) + assert all_equal(out, test_arr) + # Different orderings test_arr = np.ones(shape) out = fast_1d_tensor_mult(test_arr, [y, x, z], axes=(1, 0, 2)) @@ -290,10 +295,6 @@ def test_fast_1d_tensor_mult_error(): with pytest.raises(TypeError): fast_1d_tensor_mult([[0, 0], [0, 0]], [x, x]) - # No 1d arrays given - with pytest.raises(ValueError): - fast_1d_tensor_mult(test_arr, []) - # Length or dimension mismatch with pytest.raises(ValueError): fast_1d_tensor_mult(test_arr, [x, y]) diff --git a/odl/tomo/backends/astra_cpu.py b/odl/tomo/backends/astra_cpu.py index 3e437eb10c5..a7e307ff272 100644 --- a/odl/tomo/backends/astra_cpu.py +++ b/odl/tomo/backends/astra_cpu.py @@ -16,6 +16,7 @@ pass from odl.discr import DiscreteLp, DiscreteLpElement +from odl.space.weighting import adjoint_weightings from odl.tomo.backends.astra_setup import ( astra_projection_geometry, astra_volume_geometry, astra_data, astra_projector, astra_algorithm) @@ -121,11 +122,11 @@ def astra_cpu_back_projector(proj_data, geometry, reco_space, out=None): Parameters ---------- proj_data : `DiscreteLpElement` - Projection data to which the back-projector is applied + Projection data to which the back-projector is applied. geometry : `Geometry` - Geometry defining the tomographic setup + Geometry defining the tomographic setup. reco_space : `DiscreteLp` - Space to which the calling operator maps + Space to which the calling operator maps. out : ``reco_space`` element, optional Element of the reconstruction space to which the result is written. If ``None``, an element in ``reco_space`` is created. @@ -170,9 +171,20 @@ def astra_cpu_back_projector(proj_data, geometry, reco_space, out=None): vol_geom = astra_volume_geometry(reco_space) proj_geom = astra_projection_geometry(geometry) + # Get adjoint weighting functions + apply_dom_weighting, apply_ran_weighting = adjoint_weightings( + proj_data.space, reco_space) + + # Weight data out-of-place. If no weighting is applied, the returned + # element is aligned with the input. + proj_data_weighted = apply_dom_weighting(proj_data) + # If a copy was made, it's already contiguous and no copy should be + # needed. Otherwise, a copy may be required. + allow_copy = (proj_data_weighted is proj_data) + # Create ASTRA data structure - sino_id = astra_data(proj_geom, datatype='projection', data=proj_data, - allow_copy=True) + sino_id = astra_data(proj_geom, datatype='projection', + data=proj_data_weighted, allow_copy=allow_copy) # Create projector # TODO: implement with different schemes for angles and detector @@ -196,11 +208,8 @@ def astra_cpu_back_projector(proj_data, geometry, reco_space, out=None): # Run algorithm astra.algorithm.run(algo_id) - # Weight the adjoint by appropriate weights - scaling_factor = float(proj_data.space.weighting.const) - scaling_factor /= float(reco_space.weighting.const) - - out *= scaling_factor + # Weight output in-place + apply_ran_weighting(out, out=out) # Delete ASTRA objects astra.algorithm.delete(algo_id) diff --git a/odl/tomo/backends/astra_cuda.py b/odl/tomo/backends/astra_cuda.py index f4131e9e954..bfae2189aa8 100644 --- a/odl/tomo/backends/astra_cuda.py +++ b/odl/tomo/backends/astra_cuda.py @@ -21,6 +21,7 @@ ASTRA_CUDA_AVAILABLE = False from odl.discr import DiscreteLp +from odl.space.weighting import adjoint_weightings from odl.tomo.backends.astra_setup import ( ASTRA_VERSION, astra_projection_geometry, astra_volume_geometry, astra_projector, @@ -248,25 +249,36 @@ def call_backward(self, proj_data, out=None): else: out = self.reco_space.element() - # Copy data to GPU memory + # Get adjoint weighting functions, using an extra correction + # factor accounting for inconsistencies in certain ASTRA versions + extra_factor = astra_cuda_bp_scaling_factor( + self.proj_space, self.reco_space, self.geometry) + apply_dom_weighting, apply_ran_weighting = adjoint_weightings( + self.proj_space, self.reco_space, extra_factor) + + # Copy weighted data to GPU memory if self.geometry.ndim == 2: - astra.data2d.store(self.sino_id, proj_data.asarray()) + proj_data_arr = apply_dom_weighting(proj_data) + astra.data2d.store(self.sino_id, proj_data_arr) elif self.geometry.ndim == 3: + # Flatten along angles shape = (-1,) + self.geometry.det_partition.shape reshaped_proj_data = proj_data.asarray().reshape(shape) + # Swap angle axis to the middle, always making a copy swapped_proj_data = np.ascontiguousarray( np.swapaxes(reshaped_proj_data, 0, 1)) + # In-place since input is not aliased with `proj_data` + apply_dom_weighting(swapped_proj_data, out=swapped_proj_data) astra.data3d.store(self.sino_id, swapped_proj_data) # Run algorithm astra.algorithm.run(self.algo_id) - # Copy result to CPU memory + # Get result from cache out[:] = self.out_array - # Fix scaling to weight by pixel/voxel size - out *= astra_cuda_bp_scaling_factor( - self.proj_space, self.reco_space, self.geometry) + # Apply reco space weighting + apply_ran_weighting(out, out=out) return out @@ -361,12 +373,9 @@ def astra_cuda_bp_scaling_factor(proj_space, reco_space, geometry): # Correct in case of non-weighted spaces proj_extent = float(proj_space.partition.extent.prod()) proj_size = float(proj_space.partition.size) - proj_weighting = proj_extent / proj_size + proj_cell_volume = proj_extent / proj_size - scaling_factor *= (proj_space.weighting.const / - proj_weighting) - scaling_factor /= (reco_space.weighting.const / - reco_space.cell_volume) + scaling_factor *= reco_space.cell_volume / proj_cell_volume if parse_version(ASTRA_VERSION) < parse_version('1.8rc1'): if isinstance(geometry, Parallel2dGeometry): diff --git a/odl/tomo/backends/skimage_radon.py b/odl/tomo/backends/skimage_radon.py index a39a5dac2eb..6badb87dd86 100644 --- a/odl/tomo/backends/skimage_radon.py +++ b/odl/tomo/backends/skimage_radon.py @@ -17,6 +17,7 @@ SKIMAGE_AVAILABLE = False from odl.discr import uniform_discr_frompartition, uniform_partition +from odl.space.weighting import adjoint_weightings __all__ = ('skimage_radon_forward', 'skimage_radon_back_projector', 'SKIMAGE_AVAILABLE') @@ -56,8 +57,8 @@ def clamped_interpolation(skimage_range, sinogram): def interpolation_wrapper(x): x = (x[0], np.maximum(min_x, np.minimum(max_x, x[1]))) - return sinogram.interpolation(x) + return interpolation_wrapper @@ -140,25 +141,25 @@ def skimage_radon_back_projector(sinogram, geometry, range, out=None): # Only do asserts here since these are backend functions assert out in range - # Rotate back from (rows, cols) to (x, y) - backproj = iradon(skimage_sinogram.asarray().T, theta, - output_size=range.shape[0], filter=None, circle=False) - out[:] = np.rot90(backproj, -1) - - # Empirically determined value, gives correct scaling + # Extra weighting factor needed for skimage-internal reasons scaling_factor = 4.0 * float(geometry.motion_params.length) / (2 * np.pi) + proj_cell_volume = (sinogram.space.partition.extent.prod() / + sinogram.space.partition.size) + scaling_factor *= range.cell_volume / proj_cell_volume + + # Get weighting functions + apply_dom_weighting, apply_ran_weighting = adjoint_weightings( + sinogram.space, range, extra_factor=scaling_factor) - # Correct in case of non-weighted spaces - proj_extent = float(sinogram.space.partition.extent.prod()) - proj_size = float(sinogram.space.partition.size) - proj_weighting = proj_extent / proj_size + # Scale sinogram if necessary + sinogram = apply_dom_weighting(sinogram) - scaling_factor *= (sinogram.space.weighting.const / - proj_weighting) - scaling_factor /= (range.weighting.const / - range.cell_volume) + # Apply backprojection and rotate back from (rows, cols) to (x, y) + backproj = iradon(skimage_sinogram.asarray().T, theta, + output_size=range.shape[0], filter=None, circle=False) + out[:] = np.rot90(backproj, -1) - # Correctly scale the output - out *= scaling_factor + # Scale output + apply_ran_weighting(out, out=out) return out diff --git a/odl/tomo/operators/ray_trafo.py b/odl/tomo/operators/ray_trafo.py index 027f2fb11b1..2a870890c7e 100644 --- a/odl/tomo/operators/ray_trafo.py +++ b/odl/tomo/operators/ray_trafo.py @@ -17,7 +17,7 @@ from odl.space import FunctionSpace from odl.tomo.geometry import ( Geometry, Parallel2dGeometry, Parallel3dAxisGeometry) -from odl.space.weighting import ConstWeighting +from odl.space.weighting import PerAxisWeighting from odl.tomo.backends import ( ASTRA_AVAILABLE, ASTRA_CUDA_AVAILABLE, SKIMAGE_AVAILABLE, astra_supports, ASTRA_VERSION, @@ -212,28 +212,12 @@ def __init__(self, reco_space, geometry, variant, **kwargs): proj_space = kwargs.pop('proj_space', None) if proj_space is None: dtype = reco_space.dtype - proj_fspace = FunctionSpace(geometry.params, out_dtype=dtype) - - if not reco_space.is_weighted: - weighting = None - elif (isinstance(reco_space.weighting, ConstWeighting) and - np.isclose(reco_space.weighting.const, - reco_space.cell_volume)): - # Approximate cell volume - # TODO: find a way to treat angles and detector differently - # regarding weighting. While the detector should be uniformly - # discretized, the angles do not have to and often are not. - # The needed partition property is available since - # commit a551190d, but weighting is not adapted yet. - # See also issue #286 - extent = float(geometry.partition.extent.prod()) - size = float(geometry.partition.size) - weighting = extent / size - else: - raise NotImplementedError('unknown weighting of domain') - + proj_fspace = FunctionSpace(geometry.params, dtype_out=dtype) + # TODO: handle uniform angles with modified endpoints + # (scale projections in backproj) + proj_weighting = proj_space_weighting(reco_space, geometry) proj_tspace = reco_space.tspace_type(geometry.partition.shape, - weighting=weighting, + weighting=proj_weighting, dtype=dtype) if geometry.motion_partition.ndim == 0: @@ -545,6 +529,75 @@ def adjoint(self): return self._adjoint +def proj_space_weighting(reco_space, geometry): + """Determine a weighting for the range of a ray transform. + + If ``reco_space`` is weighted with the cell sides, and if ``geometry`` + represents an acquisition with a continuous curve or surface of motion, + then the projection space will be weighted per axis, where in + the angle axis (axes), non-uniform sampling is allowed and taken + into account, and in the detector axis (axes), uniform sampling is + mandatory. + For a 1D angle partition, the weights are given by :: + + angle_weights[1:-1] = (angles[2:] - angles[:-2]) / 2 + + in the inner part and + + angle_weights[0] = angles[1] - angles[2], + angle_weights[-1] = angles[-1] - angles[-2] + + at the boundaries. This corresponds to the summed trapezoidal rule + for non-uniform nodes. + + Parameters + ---------- + reco_space : `DiscreteLp` + Reconstruction space, domain of the `RayTransform`. + geometry : `Geometry` + Acquisition geometry to be used to create the `RayTransform`. + + Returns + ------- + proj_space_weighting : `~odl.space.weighting.Weighting` + Weighting determined from the parameters. If ``reco_space.weighting`` + is a `~odl.space.weighting.PerAxisWeighting` where the weight + factors correspond to the cell sides, the resulting weighting + is also a `PerAxisWeighting`, with factors + ``(angle_weights,) + tuple(geometry.det_partition.cell_sides)``. + + Otherwise, no weighting will be applied, and ``None`` is returned. + """ + if (isinstance(reco_space.weighting, PerAxisWeighting) and + reco_space.weighting.array_axes == () and + np.allclose(reco_space.weighting.consts, reco_space.cell_sides) and + hasattr(geometry, 'angles')): + + angle_vecs = geometry.motion_partition.coord_vectors + angle_cell_vecs = geometry.motion_partition.cell_sizes_vecs + angles_uniform = geometry.motion_partition.is_uniform_byaxis + weights = [] + + for i in range(len(angle_vecs)): + if angles_uniform[i]: + weights.append(angle_cell_vecs[i][0]) + else: + weight = np.empty_like(angle_vecs[i]) + weight[1:-1] = (angle_vecs[i][2:] - angle_vecs[i][:-2]) / 2 + weight[0] = angle_cell_vecs[i][0] + weight[-1] = angle_cell_vecs[i][-1] + + weights.append(weight) + + weights.extend(geometry.det_partition.cell_sides) + + # Use impl-specific weighting class + return type(reco_space.weighting)(weights) + + else: + return None + + if __name__ == '__main__': from odl.util.testutils import run_doctests run_doctests() diff --git a/odl/tomo/util/utility.py b/odl/tomo/util/utility.py index d1fb21406de..db0e3d2c99e 100644 --- a/odl/tomo/util/utility.py +++ b/odl/tomo/util/utility.py @@ -484,70 +484,6 @@ def transform_system(principal_vec, principal_default, other_vecs, return tuple(transformed_vecs) -def is_rotation_matrix(mat, show_diff=False): - from scipy.linalg import det, norm - - dim = mat.shape[0] - if dim != mat.shape[1]: - return False - - determ = det(mat) - right_handed = (np.abs(determ - 1.) < 1E-10) - orthonorm_diff = mat * mat.T - np.eye(dim) - diff_norm = norm(orthonorm_diff, 2) - orthonormal = (diff_norm < 1E-10) - if not right_handed or not orthonormal: - if show_diff: - print('matrix S:\n', mat) - print('det(S): ', determ) - print('S*S.T - eye:\n', orthonorm_diff) - print('2-norm of difference: ', diff_norm) - return False - return True - - -def angles_from_matrix(rot_matrix): - if rot_matrix.shape == (2, 2): - theta = np.atan2(rot_matrix[1, 0], rot_matrix[0, 0]) - return theta, - elif rot_matrix.shape == (3, 3): - if rot_matrix[2, 2] == 1.: # cannot use last row and column - theta = 0. - # upper-left block is 2d rotation for phi + psi, so one needs - # to be fixed - psi = 0. - phi = np.atan2(rot_matrix[1, 0], rot_matrix[0, 0]) - if phi < 0: - phi += 2 * np.pi # in [0, 2pi) - else: - phi = np.atan2(rot_matrix[0, 2], -rot_matrix[1, 2]) - psi = np.atan2(rot_matrix[2, 0], rot_matrix[2, 1]) - theta = np.acos(rot_matrix[2, 2]) - - if phi < 0. or psi < 0.: - phi += np.pi - psi += np.pi - theta = -theta - - return phi, theta, psi - - else: - raise ValueError('shape of `rot_matrix` must be (2, 2) or (3, 3), ' - 'got {}'.format(rot_matrix.shape)) - - -def to_lab_sys(vec_in_local_coords, local_sys): - vec_in_local_coords = np.array(vec_in_local_coords) - trafo_matrix = np.matrix(local_sys).T - return np.dot(trafo_matrix, vec_in_local_coords) - - -def to_local_sys(vec_in_lab_coords, local_sys): - vec_in_lab_coords = np.array(vec_in_lab_coords) - trafo_matrix = np.matrix(local_sys) - return np.dot(trafo_matrix, vec_in_lab_coords) - - def perpendicular_vector(vec): """Return a vector perpendicular to ``vec``. diff --git a/odl/trafos/backends/pyfftw_bindings.py b/odl/trafos/backends/pyfftw_bindings.py index b96a10fe150..78a6e2be70d 100644 --- a/odl/trafos/backends/pyfftw_bindings.py +++ b/odl/trafos/backends/pyfftw_bindings.py @@ -241,30 +241,30 @@ def _pyfftw_check_args(arr_in, arr_out, axes, halfcomplex, direction): raise ValueError('duplicate axes are not allowed') if direction == 'forward': - out_shape = list(arr_in.shape) + shape_out = list(arr_in.shape) if halfcomplex: try: - out_shape[axes[-1]] = arr_in.shape[axes[-1]] // 2 + 1 + shape_out[axes[-1]] = arr_in.shape[axes[-1]] // 2 + 1 except IndexError: raise IndexError('axis index {} out of range for array ' 'with {} axes' ''.format(axes[-1], arr_in.ndim)) - if arr_out.shape != tuple(out_shape): + if arr_out.shape != tuple(shape_out): raise ValueError('expected output shape {}, got {}' - ''.format(tuple(out_shape), arr_out.shape)) + ''.format(tuple(shape_out), arr_out.shape)) if is_real_dtype(arr_in.dtype): - out_dtype = complex_dtype(arr_in.dtype) + dtype_out = complex_dtype(arr_in.dtype) elif halfcomplex: raise ValueError('cannot combine halfcomplex forward transform ' 'with complex input') else: - out_dtype = arr_in.dtype + dtype_out = arr_in.dtype - if arr_out.dtype != out_dtype: + if arr_out.dtype != dtype_out: raise ValueError('expected output dtype {}, got {}' - ''.format(dtype_repr(out_dtype), + ''.format(dtype_repr(dtype_out), dtype_repr(arr_out.dtype))) elif direction == 'backward': diff --git a/odl/trafos/fourier.py b/odl/trafos/fourier.py index 59bc7f18295..6a56641875b 100644 --- a/odl/trafos/fourier.py +++ b/odl/trafos/fourier.py @@ -11,7 +11,7 @@ from __future__ import print_function, division, absolute_import import numpy as np -from odl.discr import DiscreteLp, discr_sequence_space +from odl.discr import DiscreteLp, uniform_discr from odl.operator import Operator from odl.set import RealNumbers, ComplexNumbers from odl.trafos.backends.pyfftw_bindings import ( @@ -59,8 +59,8 @@ def __init__(self, inverse, domain, range=None, axes=None, sign='-', range : `DiscreteLp`, optional Range of the Fourier transform. If not given, the range is determined from ``domain`` and the other parameters as - a `discr_sequence_space` with exponent ``p / (p - 1)`` - (read as 'inf' for p=1 and 1 for p='inf'). + a `DiscreteLp` space with exponent ``p / (p - 1)`` + (read as 'inf' for p=1 and 1 for p='inf') and unit-sized cells. axes : int or sequence of ints, optional Dimensions in which a transform is to be calculated. ``None`` means all axes. @@ -123,9 +123,9 @@ def __init__(self, inverse, domain, range=None, axes=None, sign='-', if range is None: impl = domain.tspace.impl - range = discr_sequence_space( - ran_shape, ran_dtype, impl, - exponent=conj_exponent(domain.exponent)) + range = uniform_discr( + [0] * len(ran_shape), ran_shape, ran_shape, impl=impl, + dtype=ran_dtype, exponent=conj_exponent(domain.exponent)) else: if range.shape != ran_shape: raise ValueError('expected range shape {}, got {}.' @@ -392,8 +392,8 @@ def __init__(self, domain, range=None, axes=None, sign='-', range : `DiscreteLp`, optional Range of the Fourier transform. If not given, the range is determined from ``domain`` and the other parameters as - a `discr_sequence_space` with exponent ``p / (p - 1)`` - (read as 'inf' for p=1 and 1 for p='inf'). + a `DiscreteLp` space with exponent ``p / (p - 1)`` + (read as 'inf' for p=1 and 1 for p='inf') and unit-sized cells. axes : int or sequence of ints, optional Dimensions in which a transform is to be calculated. ``None`` means all axes. @@ -417,7 +417,7 @@ def __init__(self, domain, range=None, axes=None, sign='-', Complex-to-complex (default) transforms have the same grids in domain and range: - >>> domain = discr_sequence_space((2, 4)) + >>> domain = odl.uniform_discr([0, 0], [1, 3], (2, 4)) >>> fft = DiscreteFourierTransform(domain) >>> fft.domain.shape (2, 4) @@ -427,7 +427,8 @@ def __init__(self, domain, range=None, axes=None, sign='-', Real-to-complex transforms have a range grid with shape ``n // 2 + 1`` in the last tranform axis: - >>> domain = discr_sequence_space((2, 3, 4), dtype='float') + >>> domain = odl.uniform_discr([0, 0, 0], [1, 2, 3], (2, 3, 4), + ... dtype='float') >>> axes = (0, 1) >>> fft = DiscreteFourierTransform( ... domain, halfcomplex=True, axes=axes) @@ -543,8 +544,8 @@ def __init__(self, range, domain=None, axes=None, sign='+', domain : `DiscreteLp`, optional Domain of the inverse Fourier transform. If not given, the domain is determined from ``range`` and the other parameters - as a `discr_sequence_space` with exponent ``p / (p - 1)`` - (read as 'inf' for p=1 and 1 for p='inf'). + a `DiscreteLp` space with exponent ``p / (p - 1)`` + (read as 'inf' for p=1 and 1 for p='inf') and unit-sized cells. axes : sequence of ints, optional Dimensions in which a transform is to be calculated. `None` means all axes. @@ -568,7 +569,7 @@ def __init__(self, range, domain=None, axes=None, sign='+', Complex-to-complex (default) transforms have the same grids in domain and range: - >>> range_ = discr_sequence_space((2, 4)) + >>> range_ = odl.uniform_discr([0, 0], [1, 3], (2, 4)) >>> ifft = DiscreteFourierTransformInverse(range_) >>> ifft.domain.shape (2, 4) @@ -578,7 +579,8 @@ def __init__(self, range, domain=None, axes=None, sign='+', Complex-to-real transforms have a domain grid with shape ``n // 2 + 1`` in the last tranform axis: - >>> range_ = discr_sequence_space((2, 3, 4), dtype='float') + >>> range_ = odl.uniform_discr([0, 0, 0], [1, 2, 3], (2, 3, 4), + ... dtype='float') >>> axes = (0, 1) >>> ifft = DiscreteFourierTransformInverse( ... range_, halfcomplex=True, axes=axes) diff --git a/odl/trafos/util/ft_utils.py b/odl/trafos/util/ft_utils.py index 32b9dcb2745..59ed29bfbbf 100644 --- a/odl/trafos/util/ft_utils.py +++ b/odl/trafos/util/ft_utils.py @@ -320,6 +320,9 @@ def dft_preprocess_data(arr, shift=True, axes=None, sign='-', out=None): out = np.array(arr, dtype=complex_dtype(arr.dtype), copy=True) else: out = arr.copy() + elif is_real_dtype(out.dtype): + # Avoid `ComplexWarning` this way instead of silencing it + out[:] = arr.real else: out[:] = arr diff --git a/odl/ufunc_ops/ufunc_ops.py b/odl/ufunc_ops/ufunc_ops.py index af88b1f408a..cfbce897f26 100644 --- a/odl/ufunc_ops/ufunc_ops.py +++ b/odl/ufunc_ops/ufunc_ops.py @@ -439,4 +439,5 @@ def ufunc_factory(domain=RealNumbers()): if __name__ == '__main__': from odl.util.testutils import run_doctests - run_doctests() + with np.errstate(divide='ignore', invalid='ignore'): + run_doctests() diff --git a/odl/util/__init__.py b/odl/util/__init__.py index 86f9196d322..61c7093238b 100644 --- a/odl/util/__init__.py +++ b/odl/util/__init__.py @@ -18,9 +18,6 @@ from .utility import * __all__ += utility.__all__ -from .npy_compat import * -__all__ += npy_compat.__all__ - from .normalize import * __all__ += normalize.__all__ @@ -33,4 +30,5 @@ from .vectorization import * __all__ += vectorization.__all__ +from . import npy_compat from . import ufuncs diff --git a/odl/util/normalize.py b/odl/util/normalize.py index de636fd2e13..3dbf632191c 100644 --- a/odl/util/normalize.py +++ b/odl/util/normalize.py @@ -11,10 +11,12 @@ from __future__ import print_function, division, absolute_import import numpy as np +from odl.util.utility import is_int + __all__ = ('normalized_scalar_param_list', 'normalized_index_expression', 'normalized_nodes_on_bdry', 'normalized_axes_tuple', - 'safe_int_conv') + 'normalized_axis_indices', 'safe_int_conv') def normalized_scalar_param_list(param, length, param_conv=None, @@ -154,17 +156,20 @@ def normalized_index_expression(indices, shape, int_to_slice=False): Returns ------- normalized : tuple of ints or `slice`'s - Normalized index expression + Normalized index expression. Examples -------- - Sequences are turned into tuples. We can have at most as many entries - as the length of ``shape``, but fewer are allowed - the remaining + Sequences are turned into tuples, except for list of integers, which + are taken as indices for the first axis. We can have at most as many + entries as the length of ``shape``, but fewer are allowed - the remaining list places are filled up by ``slice(None)``: >>> normalized_index_expression([1, 2, 3], shape=(3, 4, 5)) + ([1, 2, 3], slice(None, None, None), slice(None, None, None)) + >>> normalized_index_expression((1, 2, 3), shape=(3, 4, 5)) (1, 2, 3) - >>> normalized_index_expression([1, 2], shape=(3, 4, 5)) + >>> normalized_index_expression((1, 2), shape=(3, 4, 5)) (1, 2, slice(None, None, None)) >>> normalized_index_expression([slice(2), 2], shape=(3, 4, 5)) (slice(None, 2, None), 2, slice(None, None, None)) @@ -178,14 +183,14 @@ def normalized_index_expression(indices, shape, int_to_slice=False): axes: >>> x = np.zeros(shape=(3, 4, 5)) - >>> idx1 = normalized_index_expression([1, 2, 3], shape=(3, 4, 5), + >>> idx1 = normalized_index_expression((1, 2, 3), shape=(3, 4, 5), ... int_to_slice=True) >>> idx1 (slice(1, 2, None), slice(2, 3, None), slice(3, 4, None)) >>> x[idx1] array([[[ 0.]]]) - >>> idx2 = normalized_index_expression([1, 2, 3], shape=(3, 4, 5), - ... int_to_slice=False) + >>> idx2 = normalized_index_expression((1, 2, 3), shape=(3, 4, 5), + ... int_to_slice=False) >>> idx2 (1, 2, 3) >>> x[idx2] @@ -194,46 +199,69 @@ def normalized_index_expression(indices, shape, int_to_slice=False): ndim = len(shape) # Support indexing with fewer indices as indexing along the first # corresponding axes. In the other cases, normalize the input. - if np.isscalar(indices): + if indices is None: + indices = [None, Ellipsis] + elif np.isscalar(indices): indices = [indices, Ellipsis] - elif (isinstance(indices, slice) or indices is Ellipsis): + elif (isinstance(indices, slice) or + indices is Ellipsis or + (isinstance(indices, list) and + all(is_int(idx) for idx in indices))): + indices = [indices] + elif getattr(indices, 'dtype', object) == bool: indices = [indices] indices = list(indices) if len(indices) < ndim and Ellipsis not in indices: indices.append(Ellipsis) - # Turn Ellipsis into the correct number of slice(None) - if Ellipsis in indices: - if indices.count(Ellipsis) > 1: - raise ValueError('cannot use more than one Ellipsis.') + # Turn Ellipsis into the correct number of slice(None). We need a slightly + # more elaborate loop because we need to avoid comparing arrays to + # something using `==`, i.e., we cannot use `indices.count()` or + # `... in indices`. + ell_idx = None + for i, idx in enumerate(indices): + if idx is Ellipsis: + if ell_idx is not None: + raise ValueError('cannot use more than one Ellipsis.') + else: + ell_idx = i + + # New axes don't count towards the ellipsis axes + num_newaxes = sum(idx is None for idx in indices) - eidx = indices.index(Ellipsis) - extra_dims = ndim - len(indices) + 1 - indices = (indices[:eidx] + [slice(None)] * extra_dims + - indices[eidx + 1:]) + if ell_idx is not None: + extra_dims = ndim - (len(indices) - num_newaxes) + 1 + indices = (indices[:ell_idx] + [slice(None)] * extra_dims + + indices[ell_idx + 1:]) # Turn single indices into length-1 slices if desired - for (i, idx), n in zip(enumerate(indices), shape): + dim = 0 + for i in range(len(indices)): + if indices[i] is None: + continue + + idx = indices[i] + n = shape[dim] if np.isscalar(idx): if idx < 0: idx += n if idx >= n: - raise IndexError('Index {} is out of bounds for axis ' + raise IndexError('index {} is out of bounds for axis ' '{} with size {}.' ''.format(idx, i, n)) if int_to_slice: indices[i] = slice(idx, idx + 1) + dim += 1 + # Catch most common errors if any(s.start == s.stop and s.start is not None or s.start == n for s, n in zip(indices, shape) if isinstance(s, slice)): - raise ValueError('Slices with empty axes not allowed.') - if None in indices: - raise ValueError('creating new axes is not supported.') - if len(indices) > ndim: + raise ValueError('slices with empty axes not allowed.') + if len(indices) - num_newaxes > ndim: raise IndexError('too may indices: {} > {}.' ''.format(len(indices), ndim)) @@ -303,6 +331,49 @@ def normalized_nodes_on_bdry(nodes_on_bdry, length): return out_list +def normalized_axis_indices(indices, ndim): + """Turn the given indices into a tuple of indices in the valid range. + + This helper is intended for index normalization when indexing along + an object with ``ndim`` axes along the axes, e.g., in `DiscreteLp.byaxis`. + A slice is turned into a corresponding tuple of integers, and + a single integer is wrapped into a tuple. Negative indices are + incremented by ``ndim``. + + Parameters + ---------- + indices : slice, int or sequence of int + Object for indexing along axes. + ndim : positive int + Number of available axes determining the valid axis range. + + Returns + ------- + norm_idcs : tuple of int + The normalized indices that all satisfy ``0 <= i < ndim``. + + Raises + ------ + ValueError + If a given sequence contains non-integers. + """ + indices_in = indices + + if isinstance(indices, slice): + indices = list(range(ndim))[indices] + + try: + iter(indices) + except TypeError: + indices = [indices] + + if any(not is_int(i) for i in indices): + raise ValueError('only slice, int or sequence of int is allowed, ' + 'got {}'.format(indices_in)) + + return tuple(i + ndim if i < 0 else i for i in indices) + + def normalized_axes_tuple(axes, ndim): """Return a tuple of ``axes`` converted to positive integers. @@ -311,7 +382,7 @@ def normalized_axes_tuple(axes, ndim): Parameters ---------- - axes : int or sequence of ints + axes : int or sequence of int Single integer or integer sequence of arbitrary length. Duplicate entries are not allowed. All entries must fulfill ``-ndim <= axis <= ndim - 1``. @@ -320,7 +391,7 @@ def normalized_axes_tuple(axes, ndim): Returns ------- - axes_list : tuple of ints + axes_list : tuple of int The converted tuple of axes. Examples diff --git a/odl/util/numerics.py b/odl/util/numerics.py index b406b54c271..517eae024f7 100644 --- a/odl/util/numerics.py +++ b/odl/util/numerics.py @@ -11,11 +11,13 @@ from __future__ import print_function, division, absolute_import import numpy as np -from odl.util.normalize import normalized_scalar_param_list, safe_int_conv +from odl.util.normalize import ( + normalized_scalar_param_list, safe_int_conv, normalized_index_expression) +from odl.util.utility import is_int __all__ = ('apply_on_boundary', 'fast_1d_tensor_mult', 'resize_array', - 'zscore') + 'zscore', 'simulate_slicing') _SUPPORTED_RESIZE_PAD_MODES = ('constant', 'symmetric', 'periodic', @@ -226,10 +228,10 @@ def fast_1d_tensor_mult(ndarr, onedim_arrs, axes=None, out=None): if out is None: out = np.array(ndarr, copy=True) else: - out[:] = ndarr # Self-assignment is free if out is ndarr + out[:] = ndarr # Self-assignment is free if `out is ndarr` if not onedim_arrs: - raise ValueError('no 1d arrays given') + return out if axes is None: axes = list(range(out.ndim - len(onedim_arrs), out.ndim)) @@ -847,6 +849,197 @@ def zscore(arr): return arr +def simulate_slicing(shape, indices): + """Simulate slicing into a Numpy array with given indices. + + This function is intended to simulate indexing of a Numpy array + without actually performing the indexing. Its typical usage is to + determine the shape of a result space after indexing. + + The function works with + + - integers, + - `slice` objects, + - index arrays, i.e., 1D array-like objects containing integers, + + and combinations of the above. It does not work with boolean arrays + or more advanced "fancy indexing". + + Parameters + ---------- + shape : sequence of int + Number of entries per axis in the "simulated" array. + indices : index expression + Single instance or sequence of integer, `slice`, `None` and/or + `array-like` objects that should be used to simulate indexing of an + array. If index arrays are used, they must all be one-dimensional + and have the same length. See Examples for details. + + Returns + ------- + sliced_shape : tuple of int + The shape after simulated indexing. + collapsed_axes : tuple of int + The axes in ``shape`` that will be gone after indexing. + new_axes : tuple of int + The axes in ``sliced_shape`` that are the result of adding new + axes with ``None`` entries. + leftmost_index_array_axis : int or None + The axis in ``shape`` to which the leftmost index array applies, + or ``None`` if there is no index array. + + Raises + ------ + TypeError + If an invalid type of indexing object is used. This can be caught + by upstream code to switch to lazy indexing. + + Examples + -------- + A single integer slices along the first axis (index does not + matter as long as it lies within the bounds): + + >>> shape = (3, 4, 5, 6) + >>> simulate_slicing(shape, 0) # arr[0] + ((4, 5, 6), (0,), (), None) + + Multiple indices slice into corresponding axes from the left: + + >>> simulate_slicing(shape, (0, 0)) # arr[0, 0] + ((5, 6), (0, 1), (), None) + >>> simulate_slicing(shape, (0, 1, 1)) # arr[0, 1, 1] + ((6,), (0, 1, 2), (), None) + + Ellipsis (``...``) and ``slice(None)`` (``:``) can be used to keep + one or several axes intact: + + >>> # arr[0, :, 1, :] + >>> simulate_slicing(shape, (0, np.s_[:], 1, np.s_[:])) + ((4, 6), (0, 2), (), None) + >>> # arr[..., 0, 0] + >>> simulate_slicing(shape, (np.s_[...], 0, 0)) + ((3, 4), (2, 3), (), None) + + With slices, parts of axes can be selected: + + >>> # arr[0, :3, 1:4, ::2] + >>> simulate_slicing(shape, (0, np.s_[:3], np.s_[1:4], np.s_[::2])) + ((3, 3, 3), (0,), (), None) + + New axes are created with ``None`` objects. Their positions in the + result of indexing with the rest of the indices ends up in the + returned ``new_axes``: + + >>> # arr[None, 0, None, :3, 4, None, None, ::2] + >>> simulate_slicing( + ... shape, (None, 0, None, np.s_[:3], 4, None, None, np.s_[::2])) + ((1, 1, 3, 1, 1, 3), (0, 2), (0, 1, 3, 4), None) + + Array-like objects (must all have the same 1D shape) of integers are + treated as follows: if their common length is ``n``, a new axis + of length ``n`` is created at the position of the index arrays, and all + axes indexed by the array are collapsed (Note: There can only be one such + sequence of 1D arrays; this variant is not so useful for spaces, more + so for elements): + + >>> # arr[0, 0, [0, 1, 0, 2], :] + >>> simulate_slicing(shape, (0, 0, [0, 1, 0, 2], np.s_[:])) + ((4, 6), (0, 1), (), 2) + >>> # arr[:, [1, 1], [0, 2], :] + >>> simulate_slicing(shape, (np.s_[:], [1, 1], [0, 2], np.s_[:])) + ((3, 2, 6), (2,), (), 1) + >>> # arr[[2, 0], [3, 3], [0, 1], [5, 2]] + >>> simulate_slicing(shape, ([2, 0], [3, 3], [0, 1], [5, 2])) + ((2,), (1, 2, 3), (), 0) + """ + # Raising in the case of bool indexing as a signal for upstream code. + # This is an optimization that avoids counting the number of + # `True` entries here *and* in the actual indexing operation. + if getattr(indices, 'dtype', object) == bool: + raise TypeError('cannot index space with a boolean element or ' + 'array') + + # Handle also bool array wrapped in a sequence + try: + length = len(indices) + except TypeError: + pass + else: + if length > 0 and getattr(indices[0], 'dtype', object) == bool: + raise TypeError('cannot index space with a boolean element or ' + 'array') + + indices = normalized_index_expression(indices, shape) + + # Collect new shape, collapsed axes, new axes and index array info. + # The counter `i` tracks the original axes in `shape`, i.e., it is + # incremented only when the index is not `None`. + # The counter `i_none` tracks the *final* axes, i.e., it is always + # incremented, except if the axis is being collapsed. + i = 0 + i_none = 0 + new_shape = [] + collapsed_axes = [] + new_axes = [] + leftmost_index_array_axis = None + index_array_len = None + for idx in indices: + if idx is None: + # Make new axis of size 1 + new_shape.append(1) + new_axes.append(i_none) + i_none += 1 + elif is_int(idx): + # Collapse axis + collapsed_axes.append(i) + elif isinstance(idx, slice): + # Use length of the axis after applying the slice + start, stop, step = idx.indices(shape[i]) + new_shape.append(int(np.ceil((stop - start) / step))) + i_none += 1 + elif (isinstance(idx, (tuple, list)) or hasattr(idx, 'ndim')): + # First we do some error checking (index arrays must be 1D and + # all have the same length), and set `leftmost_index_array_axis`. + ndim = np.ndim(idx) + if ndim != 1: + # ValueError will typically not be caught upstream (in + # contrast to TypeError), indicating a true error + raise ValueError('index arrays must be 1-dimensional, ' + 'got {}-dim. array in axis {}' + ''.format(ndim, i)) + + if leftmost_index_array_axis is None: + leftmost_index_array_axis = i + if index_array_len is None: + index_array_len = len(idx) + elif len(idx) != index_array_len: + raise ValueError('index arrays must all have the same ' + 'length, got lengths {} and {}' + ''.format(index_array_len, len(idx))) + + # The leftmost index array will result in an axis of size equal + # to the common length of all index arrays, and the other axes + # affected by index arrays will be collapsed. For example, + # `a[[0, 1], [1, 1], :]` creates an axis of size 2 at index 0 + # and collapses axis 1, leaving axis 2 intact. + if i == leftmost_index_array_axis: + new_shape.append(index_array_len) + i_none += 1 + else: + collapsed_axes.append(i) + else: + raise TypeError('got invalid element {!r} in `indices`' + ''.format(idx)) + if idx is not None: + i += 1 + + return (tuple(new_shape), + tuple(collapsed_axes), + tuple(new_axes), + leftmost_index_array_axis, + ) + + if __name__ == '__main__': from odl.util.testutils import run_doctests run_doctests() diff --git a/odl/util/pytest_plugins.py b/odl/util/pytest_plugins.py index dbac0fef1a5..48f8fa78f17 100644 --- a/odl/util/pytest_plugins.py +++ b/odl/util/pytest_plugins.py @@ -29,8 +29,10 @@ @fixture(autouse=True) -def _add_doctest_np_odl(doctest_namespace): +def odl_doctest_fixture(doctest_namespace): doctest_namespace['np'] = np + # Avoid warnings in automated doctests (ufunc_ops) + np.seterr(invalid='ignore', divide='ignore') doctest_namespace['odl'] = odl diff --git a/odl/util/testutils.py b/odl/util/testutils.py index 7d016362b95..77100c6f18d 100644 --- a/odl/util/testutils.py +++ b/odl/util/testutils.py @@ -103,6 +103,10 @@ def all_equal(iter1, iter2): if iter1 is None and iter2 is None: return True + # Do it faster for arrays + if hasattr(iter1, '__array__') and hasattr(iter2, '__array__'): + return np.array_equal(iter1, iter2) + # If one nested iterator is exhausted, go to direct comparison try: it1 = iter(iter1) @@ -130,12 +134,6 @@ def all_equal(iter1, iter2): return True -def all_almost_equal_array(v1, v2, ndigits): - return np.allclose(v1, v2, - rtol=10 ** -ndigits, atol=10 ** -ndigits, - equal_nan=True) - - def all_almost_equal(iter1, iter2, ndigits=None): """Return ``True`` if all elements in ``a`` and ``b`` are almost equal.""" try: @@ -152,7 +150,9 @@ def all_almost_equal(iter1, iter2, ndigits=None): # otherwise for recursive calls. if ndigits is None: ndigits = _ndigits(iter1, iter2, None) - return all_almost_equal_array(iter1, iter2, ndigits) + return np.allclose(iter1, iter2, + rtol=10 ** -ndigits, atol=10 ** -ndigits, + equal_nan=True) try: it1 = iter(iter1) diff --git a/odl/util/utility.py b/odl/util/utility.py index 828ee0ed5d1..2349580d629 100644 --- a/odl/util/utility.py +++ b/odl/util/utility.py @@ -17,18 +17,19 @@ from functools import wraps from future.moves.itertools import zip_longest from itertools import product +import warnings import numpy as np __all__ = ( - 'array_str', 'dtype_str', 'dtype_repr', 'npy_printoptions', - 'signature_string', 'signature_string_parts', 'repr_string', - 'indent', 'dedent', 'attribute_repr_string', 'method_repr_string', - 'is_numeric_dtype', 'is_int_dtype', 'is_floating_dtype', 'is_real_dtype', - 'is_real_floating_dtype', 'is_complex_floating_dtype', - 'real_dtype', 'complex_dtype', 'is_string', 'nd_iterator', 'conj_exponent', - 'writable_array', 'run_from_ipython', 'NumpyRandomSeed', - 'cache_arguments', 'unique', + 'array_str', 'array_hash', 'dtype_str', 'dtype_repr', 'npy_printoptions', + 'npy_erroroptions', 'signature_string', 'signature_string_parts', + 'repr_string', 'indent', 'dedent', 'attribute_repr_string', + 'method_repr_string', 'is_numeric_dtype', 'is_int_dtype', + 'is_floating_dtype', 'is_real_dtype', 'is_real_floating_dtype', + 'is_complex_floating_dtype', 'real_dtype', 'complex_dtype', 'is_int', + 'is_string', 'nd_iterator', 'conj_exponent', 'writable_array', + 'run_from_ipython', 'NumpyRandomSeed', 'cache_arguments', 'unique', 'REPR_PRECISION') @@ -192,6 +193,55 @@ def __exit__(self, type, value, traceback): np.set_printoptions(**self.orig_opts) +class npy_erroroptions(object): + + """Context manager to handle error or warning conditions. + + This class extends the `numpy.errstate` context manager by the + ``'complex'`` parameter to handle ``ComplexWarning``. + + See Also + -------- + numpy.errstate + """ + + def __init__(self, **kwargs): + """Initialize a new instance. + + Parameters + ---------- + complex : {'warn', 'ignore', 'raise', 'print'} + Action to take in situations that by default emit a + `numpy.core.numeric.ComplexWarning`. + kwargs : + Other keyword arguments ``all, divide, over, under, invalid``. + See `numpy.seterr` for details. + """ + if 'all' in kwargs: + self.complex_action = kwargs['all'] + else: + self.complex_action = kwargs.pop('complex', 'warn') + self.complex_filter = None + self.npy_kwargs = kwargs + self.npy_errstate = None + + def __enter__(self): + # From `numpy` to `warnings` style + actions = {'ignore': 'ignore', 'raise': 'error', 'print': 'always'} + if self.complex_action != 'warn': + warnings.filterwarnings( + actions[self.complex_action], message=r'.*', + category=np.ComplexWarning, module=r'.*' + ) + self.complex_filter = np.warnings.filters[0] + self.npy_old_errstate = np.seterr(**self.npy_kwargs) + + def __exit__(self, type, value, traceback): + if self.complex_filter is not None: + np.warnings.filters.remove(self.complex_filter) + np.seterr(**self.npy_old_errstate) + + def array_str(a, nprint=6): """Stringification of an array. @@ -267,6 +317,11 @@ def array_str(a, nprint=6): return a_str +def array_hash(arr): + """Return a hash of an array, using a conversion to bytes.""" + return hash(arr.tobytes()) + + def dtype_repr(dtype): """Stringify ``dtype`` for ``repr`` with default for int and float.""" dtype = np.dtype(dtype) @@ -354,6 +409,8 @@ def cache_arguments(function): @cache_arguments def is_numeric_dtype(dtype): """Return ``True`` if ``dtype`` is a numeric type.""" + if dtype is None: + return False dtype = np.dtype(dtype) return np.issubsctype(getattr(dtype, 'base', None), np.number) @@ -361,6 +418,8 @@ def is_numeric_dtype(dtype): @cache_arguments def is_int_dtype(dtype): """Return ``True`` if ``dtype`` is an integer type.""" + if dtype is None: + return False dtype = np.dtype(dtype) return np.issubsctype(getattr(dtype, 'base', None), np.integer) @@ -380,6 +439,8 @@ def is_real_dtype(dtype): @cache_arguments def is_real_floating_dtype(dtype): """Return ``True`` if ``dtype`` is a real floating point type.""" + if dtype is None: + return False dtype = np.dtype(dtype) return np.issubsctype(getattr(dtype, 'base', None), np.floating) @@ -387,6 +448,8 @@ def is_real_floating_dtype(dtype): @cache_arguments def is_complex_floating_dtype(dtype): """Return ``True`` if ``dtype`` is a complex floating point type.""" + if dtype is None: + return False dtype = np.dtype(dtype) return np.issubsctype(getattr(dtype, 'base', None), np.complexfloating) @@ -436,6 +499,9 @@ def real_dtype(dtype, default=None): >>> real_dtype(('complex64', (3,))) dtype(('>> complex_dtype(('float32', (3,))) dtype((' array([ 1., 1., 1.]) + # np.ones((2, 3))[np.array([1])] -> array([[ 1., 1., 1.]]) + return False + + # Semi-slow track: reject objects not castable to int + try: + int(obj) + except Exception: + return False + + try: + # Slow track, mainly for array types that support scalars + arr = np.asarray(obj) + return np.issubsctype(arr, np.integer) and arr.shape == () + except (TypeError, ValueError): + return False + + def is_string(obj): """Return ``True`` if ``obj`` behaves like a string, ``False`` else.""" + # Shortcut for common case + if isinstance(obj, str): + return True + try: obj + '' except TypeError: