From c99614e8ea6a96e1cf05b12be50e55202fb5faad Mon Sep 17 00:00:00 2001 From: ghiggi Date: Tue, 18 Jan 2022 12:27:12 +0100 Subject: [PATCH 01/12] Add initial methods for RW --- pyresample/geometry.py | 525 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 499 insertions(+), 26 deletions(-) diff --git a/pyresample/geometry.py b/pyresample/geometry.py index 07dddc79e..61a543d4e 100644 --- a/pyresample/geometry.py +++ b/pyresample/geometry.py @@ -715,39 +715,310 @@ def copy(self): def _do_transform(src, dst, lons, lats, alt): """Run pyproj.transform and stack the results.""" x, y, z = transform(src, dst, lons, lats, alt) - return np.dstack((x, y, z)) - - def aggregate(self, **dims): - """Aggregate the current swath definition by averaging. - - For example, averaging over 2x2 windows: - `sd.aggregate(x=2, y=2)` + return np.dstack((x, y, z)) + + # TODO: this is more efficient than pyproj.transform. + # With pyproj > 3.1 it became thread safe: https://pyproj4.github.io/pyproj/stable/advanced_examples.html#multithreading + # @staticmethod + # def swath_do_transform(src, dst, lons, lats, alt): + # """Run pyproj Transformer and stack the results.""" + # from pyproj import Transformer + # transformer = Transformer.from_crs(src.crs, dst.crs) + # x, y, z = transformer.transform(lons, lats, alt, radians=False) + # return np.dstack((x, y, z)) + + def aggregate(self, x=1, y=1, **kwargs): + """Downsample the swath definition by averaging the coordinates along x and y dimension. + + Builds upon xarray.DataArray.coarsen function. + To downsample of a factor of 2, call swath_def.aggregate(x=2, y=2) + swath_def.aggregate(x=1, y=1) simply returns the current swath_def. + By default, it raise a ValueError if the dimension size is not a multiple of the window size. + This can be changed by passing boundary="trim" or boundary="pad", but behaviour within pyresample is undefined. + See https://xarray.pydata.org/en/stable/generated/xarray.DataArray.coarsen.html for further details. """ + import xarray as xr import dask.array as da import pyproj - + + # Check input validity + x = int(x) + y = int(y) + if x < 1 or y < 1: + raise ValueError('x and y arguments must be positive integers larger or equal to 1.') + + # Return SwathDefinition if nothing to aggregate + if x==1 and y==1: + return self + + # Define geodetic and geocentric projection geocent = pyproj.Proj(proj='geocent') latlong = pyproj.Proj(proj='latlong') + + # Get xr.DataArray with dask array + src_lats, src_lats_format = _convert_2D_array(self.lats, to='DataArray_Dask', dims=['lats','lons']) + src_lons, src_lons_format = _convert_2D_array(self.lons, to='DataArray_Dask', dims=['lats','lons']) + + # Conversion to Geocentric Cartesian (x,y,z) CRS res = da.map_blocks(self._do_transform, latlong, geocent, - self.lons.data, self.lats.data, - da.zeros_like(self.lons.data), new_axis=[2], - chunks=(self.lons.chunks[0], self.lons.chunks[1], 3)) - res = DataArray(res, dims=['y', 'x', 'coord'], coords=self.lons.coords) - res = res.coarsen(**dims).mean() + src_lons.data, + src_lats.data, + da.zeros_like(src_lons), # altitude + new_axis=[2], + chunks=(src_lons.chunks[0], src_lons.chunks[1], 3)) + res = xr.DataArray(res, dims=['y', 'x', 'coord'], coords=src_lons.coords) + + # Aggregating + res = res.coarsen(x=x, y=y, **kwargs).mean() + + # Back-conversion to geographic CRS lonlatalt = da.map_blocks(self._do_transform, geocent, latlong, - res[:, :, 0].data, res[:, :, 1].data, - res[:, :, 2].data, new_axis=[2], + res[:, :, 0].data, # x + res[:, :, 1].data, # y + res[:, :, 2].data, # z + new_axis=[2], chunks=res.data.chunks) - lons = DataArray(lonlatalt[:, :, 0], dims=self.lons.dims, - coords=res.coords, attrs=self.lons.attrs.copy()) - lats = DataArray(lonlatalt[:, :, 1], dims=self.lons.dims, - coords=res.coords, attrs=self.lons.attrs.copy()) - try: - resolution = lons.attrs['resolution'] * ((dims.get('x', 1) + dims.get('y', 1)) / 2) - lons.attrs['resolution'] = resolution - lats.attrs['resolution'] = resolution - except KeyError: - pass + + # Back-conversion array as input format + lons, _ = _convert_2D_array(lonlatalt[:, :, 0], to=src_lons_format, dims=['lats','lons']) + lats, _ = _convert_2D_array(lonlatalt[:, :, 1], to=src_lats_format, dims=['lats','lons']) + + # Add additional info if the source array is a DataArray + if isinstance(self.lats, xr.DataArray) and isinstance(self.lons, xr.DataArray): + lats.coords = res.coords + lats.attrs = self.lats.attrs.copy() + lons.coords = res.coords + lons.attrs = self.lons.attrs.copy() + try: + resolution = lons.attrs['resolution'] * ((x + y) / 2) + lons.attrs['resolution'] = resolution + lats.attrs['resolution'] = resolution + except KeyError: + pass + + # Return the downsampled swath definition + return SwathDefinition(lons, lats) + + def upsample(self, x=1, y=1): + """Upsample the swath definition along x (along-track) and y (cross-track) dimensions. + + To upsample of a factor of 2 (each pixel splitted in 4 pixels), call swath_def.upsample(x=2, y=2). + swath_def.upsample(x=1, y=1) simply returns the current swath_def. + """ + # TODO: An alternative would be to use geotiepoints.geointerpolator.GeoInterpolator + # But I have some problem using it, see code snippet in the PR description. + import dask.array as da + import xarray as xr + import numpy as np + import pyproj + from xarray.plot.utils import _infer_interval_breaks + # https://github.com/pydata/xarray/blob/main/xarray/plot/utils.py#L784 + + def _upsample_ranges_1D(x, factor=1): + ranges2D = np.linspace(x[:-1], x[1:], num=factor, endpoint=False, axis=1) + return np.concatenate((ranges2D.ravel(),[x[-1]])) + + def upsample_ranges_2D(x, factor=1, axis=0): + x = np.array(x) + if x.ndim not in [1,2]: + raise ValueError("Expects 1D or 2D array.") + if not isinstance(axis, int): + raise TypeError("'axis' must be: 0 or 1 integer.") + if axis not in [0, 1]: + raise ValueError("Expects 'axis' 0 or 1") + if not isinstance(factor, int): + raise TypeError("'factor' must be an integer equal or larger than 1.") + if factor < 1: + raise ValueError("'factor' must be an integer equal or larger than 1.") + + if x.ndim == 1: + return _upsample_ranges_1D(x, factor=factor) + else: + l_ranges = [] + if axis==1: + for i in range(x.shape[0]): + l_ranges.append(_upsample_ranges_1D(x[i,:], factor=factor)) + return np.vstack(l_ranges) + else: # axis = 0 + for i in range(x.shape[1]): + l_ranges.append(_upsample_ranges_1D(x[:,i], factor=factor)) + return np.vstack(l_ranges).transpose() + + def _upsample_corners(corners, x_factor=1, y_factor=1): + new_breaks_xx = upsample_ranges_2D(corners, factor=x_factor, axis=1) + new_corners = upsample_ranges_2D(new_breaks_xx, factor=y_factor, axis=0) + return new_corners + + def _get_corners_from_centroids(centroids): + breaks_xx = _infer_interval_breaks(centroids, axis=1) + corners = _infer_interval_breaks(breaks_xx, axis=0) + return corners + + def _get_centroids_from_corners(corners): + centroids = (corners[1:, 1:] + corners[:-1, :-1]) / 2 + return centroids + + # TODO: Decide if compute in memory or with dask + def upsample_centroids(centroid_x, centroid_y, centroid_z, x_factor=1, y_factor=1): + corners_x = _get_corners_from_centroids(centroid_x) + corners_y = _get_corners_from_centroids(centroid_y) + corners_z = _get_corners_from_centroids(centroid_z) + x_new_corners = _upsample_corners(corners_x, x_factor=x_factor, y_factor=y_factor) + y_new_corners = _upsample_corners(corners_y, x_factor=x_factor, y_factor=y_factor) + z_new_corners = _upsample_corners(corners_z, x_factor=x_factor, y_factor=y_factor) + x_new_centroids = _get_centroids_from_corners(x_new_corners) + y_new_centroids = _get_centroids_from_corners(y_new_corners) + z_new_centroids = _get_centroids_from_corners(z_new_corners) + return x_new_centroids, y_new_centroids, z_new_centroids + + def _upsample_centroid(centroid, x_factor=1, y_factor=1): + corners = _get_corners_from_centroids(centroid) + new_corners = _upsample_corners(corners, x_factor=x_factor, y_factor=y_factor) + new_centroids = _get_centroids_from_corners(new_corners) + return new_centroids + + # Define geodetic and geocentric projection + geocent = pyproj.Proj(proj='geocent') + latlong = pyproj.Proj(proj='latlong') + + # Get xr.DataArray with dask array + src_lats, src_lats_format = _convert_2D_array(self.lats, to='DataArray_Dask', dims=['lats','lons']) + src_lons, src_lons_format = _convert_2D_array(self.lons, to='DataArray_Dask', dims=['lats','lons']) + + # Conversion to Geocentric Cartesian (x,y,z) CRS + res = da.map_blocks(self._do_transform, latlong, geocent, + src_lons.data, + src_lats.data, + da.zeros_like(src_lons), # altitude + new_axis=[2], + chunks=(src_lons.chunks[0], src_lons.chunks[1], 3)) + res = xr.DataArray(res, dims=['y', 'x', 'xyz']) + + # Retrieve new centroids + # TODO: make it dask compatible using _upsample_centroid_dask [HELP WANTED] + # res1 = da.apply_along_axis(_upsample_centroid_dask, + # 2, + # res.data, + # x, + # y) + # res1 = xr.DataArray(res1, dims=['y', 'x', 'coord'], coords=src_lons.coords) + + res = np.stack(upsample_centroids(res[:,:,0].data, + res[:,:,1].data, + res[:,:,2].data, x_factor=x, y_factor=y), axis=2) + new_centroids = xr.DataArray(da.from_array(res), dims=['y', 'x', 'xyz']) + + # Back-conversion to geographic CRS + lonlatalt = da.map_blocks(self._do_transform, geocent, latlong, + new_centroids[:, :, 0].data, # x + new_centroids[:, :, 1].data, # y + new_centroids[:, :, 2].data, # z + new_axis=[2], + chunks=new_centroids.data.chunks) + + # Back-conversion array as input format + lons, _ = _convert_2D_array(lonlatalt[:, :, 0], to=src_lons_format, dims=['lats','lons']) + lats, _ = _convert_2D_array(lonlatalt[:, :, 1], to=src_lats_format, dims=['lats','lons']) + + # Add additional info if the source array is a DataArray + if isinstance(self.lats, xr.DataArray) and isinstance(self.lons, xr.DataArray): + lats.coords = res.coords + lats.attrs = self.lats.attrs.copy() + lons.coords = res.coords + lons.attrs = self.lons.attrs.copy() + try: + resolution = lons.attrs['resolution'] * ((x + y) / 2) + lons.attrs['resolution'] = resolution + lats.attrs['resolution'] = resolution + except KeyError: + pass + + # Return the downsampled swath definition + return SwathDefinition(lons, lats) + + def extend(self, x=0, y=0): + """Extend the swath definition along x (along-track) and y (across-track) dimensions. + By default, it does not extend on any direction. + To extend of n pixel on both sides of the across-track direction, call swath_def.extend(x=0, y=2). + """ + import xarray as xr + # Check input validity + x = int(x) + y = int(y) + if x < 0 or y < 0: + raise ValueError('x and y arguments must be positive integers.') + + # Return SwathDefinition if nothing to extend + if x==0 and y==0: + return self + + # Get lats/lons numpy arrays + src_lats, src_lats_format = _convert_2D_array(self.lats, to='numpy', dims=['lats','lons']) + src_lons, src_lons_format = _convert_2D_array(self.lons, to='numpy', dims=['lats','lons']) + + dst_lats = src_lats + dst_lons = src_lons + + # Extend on y direction (side0 and side2) + if y > 0: + list_side0 = (src_lons[1,:], src_lats[1,:], src_lons[0,:], src_lats[0,:]) + list_side2 = (src_lons[-2,:], src_lats[-2,:], src_lons[-1,:], src_lats[-1,:]) + extended_side0_lonlats = _get_extended_lonlats(*list_side0, npts=y) + extended_side2_lonlats = _get_extended_lonlats(*list_side2, npts=y) + dst_lats = np.concatenate((extended_side0_lonlats[1][::-1,:], dst_lats), axis=0) + dst_lats = np.concatenate((dst_lats, extended_side2_lonlats[1]), axis=0) + dst_lons = np.concatenate((extended_side0_lonlats[0][::-1,:], dst_lons), axis=0) + dst_lons = np.concatenate((dst_lons, extended_side2_lonlats[0]), axis=0) + + # Extend on x direction (side1 and side3) + if x > 0: + list_side1 = (dst_lons[:,-2], dst_lats[:,-2], dst_lons[:,-1], dst_lats[:,-1]) + list_side3 = (dst_lons[:, 1], dst_lats[:, 1], dst_lons[:, 0], dst_lats[:, 0]) + extended_side1_lonlats = _get_extended_lonlats(*list_side1, npts=x, transpose=False) + extended_side3_lonlats = _get_extended_lonlats(*list_side3, npts=x, transpose=False) + dst_lats = np.concatenate((dst_lats, extended_side1_lonlats[1]), axis=1) + dst_lats = np.concatenate((extended_side3_lonlats[1][:,::-1], dst_lats), axis=1) + dst_lons = np.concatenate((dst_lons, extended_side1_lonlats[0]), axis=1) + dst_lons = np.concatenate((extended_side3_lonlats[0][:, ::-1], dst_lons), axis=1) + + # Back-conversion array as input format + lons, _ = _convert_2D_array(dst_lons, to=src_lons_format, dims=['lats','lons']) + lats, _ = _convert_2D_array(dst_lats, to=src_lats_format, dims=['lats','lons']) + + # Add additional info if the source array is a DataArray + if isinstance(self.lats, xr.DataArray) and isinstance(self.lons, xr.DataArray): + lats.attrs = self.lats.attrs.copy() + lons.attrs = self.lons.attrs.copy() + + # Return the extended SwathDefinition + return SwathDefinition(lons, lats) + + def reduce(self, x=0, y=0): + """Reduce the swath definition along x (along-track) and y (across-track) dimensions. + By default, it does not reduce on any direction. + To reduce of n pixel on both sides of the across-track direction, call swath_def.reduce(x=0, y=2). + """ + # Check input validity (ensure reduced area is at least 2x2) + height = self.lats.shape[0] + width = self.lats.shape[1] + x = int(x) + y = int(y) + if x < 0 or y < 0: + raise ValueError('x and y arguments must be positive integers.') + if x >= np.floor(width/2): + max_x = int(np.floor(width/2)) - 1 + raise ValueError("You can at maximum reduce the along-track direction (x) of SwathDef by {} pixels on each side.".format(max_x)) + if y >= np.floor(height/2): + max_y = int(np.floor(height/2)) - 1 + raise ValueError("You can at maximum reduce the across-track direction (y) of SwathDef by {} pixels on each side.".format(max_y)) + + # Return SwathDefinition if nothing to reduce + if x==0 and y==0: + return self + + # Return the reduced SwathDefinition + lats = self.lats[slice(0+y,height-y), slice(0+x,width-x)] + lons = self.lons[slice(0+y,height-y), slice(0+x,width-x)] return SwathDefinition(lons, lats) def __hash__(self): @@ -1426,7 +1697,91 @@ def aggregate(self, **dims): width = int(self.width / dims.get('x', 1)) height = int(self.height / dims.get('y', 1)) return self.copy(height=height, width=width) - + + def areadef_upsample(self, x=1, y=1): + """Return an upsampled version of the area.""" + width = int(self.width * x) + height = int(self.height * y) + return self.copy(height=height, width=width) + + def areadef_extend(self, x=0, y=0): + """Extend AreaDef by x/y pixels on each side.""" + if self.is_geostationary: + raise NotImplementedError("'extend' method is not implemented for GEO AreaDefinition.") + # Check input validity + x = int(x) + y = int(y) + if x < 0 or y < 0: + raise ValueError('x and y arguments must be positive integers.') + + # Retrieve pixel and area info + new_width = self.width + 2*x + new_height = self.height + 2*y + pixel_size_x = self.pixel_size_x + pixel_size_y = self.pixel_size_y + # Extend area_extent (lower_left_x, lower_left_y, upper_right_x, upper_right_y) + area_extent = self._area_extent + new_area_extent = list(area_extent) + new_area_extent[0] = new_area_extent[0] - pixel_size_x*x + new_area_extent[1] = new_area_extent[1] - pixel_size_y*y + new_area_extent[2] = new_area_extent[2] + pixel_size_x*x + new_area_extent[3] = new_area_extent[3] + pixel_size_y*y + # Define new AreaDefinition + projection = self.crs_wkt + area_def = AreaDefinition(self.area_id, self.description, self.proj_id, + projection=projection, + width=new_width, + height=new_height, + area_extent=new_area_extent, + rotation=self.rotation, + nprocs=self.nprocs, + dtype=self.dtype) + + return area_def + + def areadef_reduce(self, x=0, y=0): + """Reduce AreaDef by x/y pixels on each side.""" + if self.is_geostationary: + raise NotImplementedError("'reduce' method is not implemented for GEO AreaDefinition.") + # Check input validity (ensure reduced area is at least 2x2) + width = self.width + height = self.height + x = int(x) + y = int(y) + if x < 0 or y < 0: + raise ValueError('x and y arguments must be positive integers.') + if x >= np.floor(width/2): + max_x = int(np.floor(width/2)) - 1 + raise ValueError("You can at maximum reduce width (x) of AreaDef by {} pixels on each side.".format(max_x)) + if y >= np.floor(height/2): + max_y = int(np.floor(height/2)) - 1 + raise ValueError("You can at maximum reduce height (y) of AreaDef by {} pixels on each side.".format(max_y)) + + # Retrieve pixel and area info + new_width = self.width + 2*x + new_height = self.height + 2*y + pixel_size_x = self.pixel_size_x + pixel_size_y = self.pixel_size_y + area_extent = self._area_extent + # Extend area_extent (lower_left_x, lower_left_y, upper_right_x, upper_right_y) + new_area_extent = list(area_extent) + new_area_extent[0] = new_area_extent[0] + pixel_size_x*x + new_area_extent[1] = new_area_extent[1] + pixel_size_y*y + new_area_extent[2] = new_area_extent[2] - pixel_size_x*x + new_area_extent[3] = new_area_extent[3] - pixel_size_y*y + # Define new AreaDefinition + projection = self.crs_wkt + area_def = AreaDefinition(self.area_id, self.description, self.proj_id, + projection=projection, + width=new_width, + height=new_height, + area_extent=new_area_extent, + rotation=self.rotation, + nprocs=self.nprocs, + dtype=self.dtype) + + return area_def + @property def resolution(self): """Return area resolution in X and Y direction.""" @@ -2770,6 +3125,124 @@ def update_hash(self, the_hash=None): the_hash = areadef.update_hash(the_hash) return the_hash +def _convert_2D_array(arr, to, dims=None): + """ + Convert a 2D array to a specific format. + Useful to return swath lons, lats in the same original format after processing. + + Parameters + ---------- + arr : (np.ndarray, da.Array, xr.DataArray) + The 2D array to be converted to another array format. + to : TYPE + The desired array output format. + Accepted formats are: ['Numpy','Dask', 'DataArray_Numpy','DataArray_Dask'] + dims : tuple, optional + Optional argument for the specification of DataArray dimension names + if input array is Numpy or Dask. + Provide a tuple with (y_dimname, x_dimname). + The default is None --> (dim_0, dim_1) + + Returns + ------- + dst_arr : (np.ndarray, da.Array, xr.DataArray) + The converted 2D array. + src_format: str + The source format of the 2D array. + + """ + import numpy as np + import dask.array as da + import xarray as xr + # Checks + valid_format = ['Numpy','Dask', 'DataArray_Numpy','DataArray_Dask'] + if not isinstance(to, str): + raise TypeError("'to' must be a string indicating the conversion array format.") + if not np.isin(to.lower(), np.char.lower(valid_format)): + raise ValueError("Valid conversion array formats are {}".format(valid_format)) + if not isinstance(arr, (np.ndarray, da.Array, xr.DataArray)): + raise TypeError("The provided array must be either a np.ndarray, a dask.Array or a xr.DataArray.") + # Numpy + if isinstance(arr, np.ndarray): + if to.lower() == 'numpy': + dst_arr = arr + elif to.lower() == 'dask': + dst_arr = da.from_array(arr) + elif to.lower() == 'dataarray_numpy': + dst_arr = xr.DataArray(arr, dims=dims) + elif to.lower() == 'dataarray_dask': + dst_arr = xr.DataArray(da.from_array(arr), dims=dims) + else: + raise NotImplementedError + return dst_arr, 'numpy' + # Dask + elif isinstance(arr, da.Array): + if to.lower() == 'numpy': + dst_arr = arr.compute() + elif to.lower() == 'dask': + dst_arr = arr + elif to.lower() == 'dataarray_numpy': + dst_arr = xr.DataArray(arr.compute() , dims=dims) + elif to.lower() == 'dataarray_dask': + dst_arr = xr.DataArray(arr, dims=dims) + else: + raise NotImplementedError + return dst_arr, 'dask' + + # DataArray_Numpy + elif isinstance(arr, xr.DataArray) and isinstance(arr.data, np.ndarray): + if to.lower() == 'numpy': + dst_arr = arr.data + elif to.lower() == 'dask': + dst_arr = da.from_array(arr.data) + elif to.lower() == 'dataarray_numpy': + dst_arr = arr + elif to.lower() == 'dataarray_dask': + dst_arr = xr.DataArray(da.from_array(arr.data), dims=dims) + else: + raise NotImplementedError + return dst_arr, 'DataArray_Numpy' + + # DataArray_Dask + elif isinstance(arr, xr.DataArray) and isinstance(arr.data, da.Array): + if to.lower() == 'numpy': + dst_arr = arr.data.compute() + elif to.lower() == 'dask': + dst_arr = arr.data + elif to.lower() == 'dataarray_numpy': + dst_arr = arr.compute() + elif to.lower() == 'dataarray_dask': + dst_arr = arr + else: + raise NotImplementedError + return dst_arr, 'DataArray_Dask' + + else: + raise NotImplementedError + +def _get_extended_lonlats(lon_start, lat_start, lon_end, lat_end, npts, transpose=True): + """Utils employed by SwathDefinition.extend. + It extrapolate npts following the forward azimuth with an interdistance + equal to the distance between the starting point and the end point. + """ + import pyproj + geod = pyproj.Geod(ellps='sphere') # TODO: sphere or WGS84? + # geod = pyproj.Geod(ellps='WGS84') # sphere + az12_arr, _, dist_arr = geod.inv(lon_start, lat_start, lon_end, lat_end) + list_lat = [] + list_lon = [] + for lon, lat, az12, dist in zip(lon_end, lat_end, az12_arr, dist_arr): + points = geod.fwd_intermediate(lon, lat, az12, del_s=dist, npts=npts, + out_lons=None, out_lats=None, radians=False) + list_lat.append(points.lats) + list_lon.append(points.lons) + + new_lats = np.stack(list_lat) + new_lons = np.stack(list_lon) + if transpose: + new_lats = new_lats.T + new_lons = new_lons.T + return new_lons, new_lats def _get_slice(segments, shape): """Segment a 1D or 2D array.""" From a7c02357a34d59277edc217d1bf9507ec8013ab2 Mon Sep 17 00:00:00 2001 From: ghiggi Date: Tue, 18 Jan 2022 15:40:16 +0100 Subject: [PATCH 02/12] Add tests --- pyresample/test/test_geometry.py | 259 +++++++++++++++++++++++++++++-- 1 file changed, 249 insertions(+), 10 deletions(-) diff --git a/pyresample/test/test_geometry.py b/pyresample/test/test_geometry.py index b3d0b9202..bb6e96f22 100644 --- a/pyresample/test/test_geometry.py +++ b/pyresample/test/test_geometry.py @@ -37,7 +37,6 @@ ) from pyresample.test.utils import catch_warnings - class Test(unittest.TestCase): """Unit testing the geometry and geo_filter modules.""" @@ -1877,22 +1876,189 @@ def test_aggregation(self): import dask.array as da import numpy as np import xarray as xr + from pyresample.geometry import SwathDefinition window_size = 2 resolution = 3 lats = np.array([[0, 0, 0, 0], [1, 1, 1, 1.0]]) lons = np.array([[178.5, 179.5, -179.5, -178.5], [178.5, 179.5, -179.5, -178.5]]) - xlats = xr.DataArray(da.from_array(lats, chunks=2), dims=['y', 'x'], + lats_dask = da.from_array(lats, chunks=2) + lons_dask = da.from_array(lons, chunks=2) + lats_xr = xr.DataArray(lats, dims=['y', 'x'], + attrs={'resolution': resolution}) + lons_xr = xr.DataArray(lons, dims=['y', 'x'], attrs={'resolution': resolution}) - xlons = xr.DataArray(da.from_array(lons, chunks=2), dims=['y', 'x'], + lats_xr_dask = xr.DataArray(lats_dask, dims=['y', 'x'], attrs={'resolution': resolution}) + lons_xr_dask = xr.DataArray(lons_dask, dims=['y', 'x'], + attrs={'resolution': resolution}) + sd_np = SwathDefinition(lons, lats) + sd_xr = SwathDefinition(lons_xr, lats_xr) + sd_xr_dask = SwathDefinition(lons_xr_dask, lats_xr_dask) + + res_np = sd_np.aggregate(y=window_size, x=window_size) + res_xr = sd_xr.aggregate(y=window_size, x=window_size) + res_xr_dask = sd_xr_dask.aggregate(y=window_size, x=window_size) + + assert isinstance(res_np.lons, np.ndarray) + assert isinstance(res_xr.lats, xr.DataArray) + assert isinstance(res_xr_dask.lats, xr.DataArray) + assert isinstance(res_xr.lats.data, np.ndarray) + assert isinstance(res_xr_dask.lats.data, da.Array) + + np.testing.assert_allclose(res_np.lons, [[179, -179]]) + np.testing.assert_allclose(res_np.lats, [[0.5, 0.5]], atol=2e-5) + np.testing.assert_allclose(res_xr.lons.data, res_np.lons) + np.testing.assert_allclose(res_xr.lats.data, res_np.lats) + np.testing.assert_allclose(res_xr_dask.lons.values, res_np.lons) + np.testing.assert_allclose(res_xr_dask.lats.values, res_np.lats) + + self.assertAlmostEqual(res_xr.lons.resolution, resolution * window_size) + self.assertAlmostEqual(res_xr.lats.resolution, resolution * window_size) + + def test_upsampling(self): + """Test upsampling on SwathDefinitions.""" + import dask.array as da + import numpy as np + import xarray as xr from pyresample.geometry import SwathDefinition - sd = SwathDefinition(xlons, xlats) - res = sd.aggregate(y=window_size, x=window_size) - np.testing.assert_allclose(res.lons, [[179, -179]]) - np.testing.assert_allclose(res.lats, [[0.5, 0.5]], atol=2e-5) - self.assertAlmostEqual(res.lons.resolution, window_size * resolution) - self.assertAlmostEqual(res.lats.resolution, window_size * resolution) - + window_size = 2 + resolution = 4 + + lons = np.array([5.0,9.0]) + lats = np.array([6.0,4.0]) + lons, lats = np.meshgrid(lons, lats) + + lats_dask = da.from_array(lats, chunks=2) + lons_dask = da.from_array(lons, chunks=2) + lats_xr = xr.DataArray(lats, dims=['y', 'x'], + attrs={'resolution': resolution}) + lons_xr = xr.DataArray(lons, dims=['y', 'x'], + attrs={'resolution': resolution}) + lats_xr_dask = xr.DataArray(lats_dask, dims=['y', 'x'], + attrs={'resolution': resolution}) + lons_xr_dask = xr.DataArray(lons_dask, dims=['y', 'x'], + attrs={'resolution': resolution}) + sd_np = SwathDefinition(lons, lats) + sd_xr = SwathDefinition(lons_xr, lats_xr) + sd_xr_dask = SwathDefinition(lons_xr_dask, lats_xr_dask) + + res_np = sd_np.upsample(y=window_size, x=window_size) + res_xr = sd_xr.upsample(y=window_size, x=window_size) + res_xr_dask = sd_xr_dask.upsample(y=window_size, x=window_size) + + assert isinstance(res_np.lons, np.ndarray) + assert isinstance(res_xr.lats, xr.DataArray) + assert isinstance(res_xr_dask.lats, xr.DataArray) + assert isinstance(res_xr.lats.data, np.ndarray) + assert isinstance(res_xr_dask.lats.data, da.Array) + + np.testing.assert_allclose(res_np.lons[0,:], [4,6,8,10], atol=1e-2) + np.testing.assert_allclose(res_np.lats[:,1], [6.5, 5.5, 4.5, 3.5], atol=1e-2) + np.testing.assert_allclose(res_xr.lons.data, res_np.lons) + np.testing.assert_allclose(res_xr.lats.data, res_np.lats) + np.testing.assert_allclose(res_xr_dask.lons.values, res_np.lons) + np.testing.assert_allclose(res_xr_dask.lats.values, res_np.lats) + + self.assertAlmostEqual(res_xr.lons.resolution, resolution / window_size) + self.assertAlmostEqual(res_xr.lats.resolution, resolution / window_size) + + def test_extend(self): + """Test extend on SwathDefinitions.""" + import dask.array as da + import numpy as np + import xarray as xr + from pyresample.geometry import SwathDefinition + x = 2 + y = 2 + resolution = 4 + + lons = np.arange(-179.5, -178.5, 0.5) + lats = np.arange(-89.5, -88.5, 0.5) + lons, lats = np.meshgrid(lons, lats) + + lats_dask = da.from_array(lats, chunks=2) + lons_dask = da.from_array(lons, chunks=2) + lats_xr = xr.DataArray(lats, dims=['y', 'x'], + attrs={'resolution': resolution}) + lons_xr = xr.DataArray(lons, dims=['y', 'x'], + attrs={'resolution': resolution}) + lats_xr_dask = xr.DataArray(lats_dask, dims=['y', 'x'], + attrs={'resolution': resolution}) + lons_xr_dask = xr.DataArray(lons_dask, dims=['y', 'x'], + attrs={'resolution': resolution}) + sd_np = SwathDefinition(lons, lats) + sd_xr = SwathDefinition(lons_xr, lats_xr) + sd_xr_dask = SwathDefinition(lons_xr_dask, lats_xr_dask) + + res_np = sd_np.extend(y=y, x=x) + res_xr = sd_xr.extend(y=y, x=x) + res_xr_dask = sd_xr_dask.extend(y=y, x=x) + + assert isinstance(res_np.lons, np.ndarray) + assert isinstance(res_xr.lats, xr.DataArray) + assert isinstance(res_xr_dask.lats, xr.DataArray) + assert isinstance(res_xr.lats.data, np.ndarray) + assert isinstance(res_xr_dask.lats.data, da.Array) + + np.testing.assert_allclose(res_np.lons[2, 0:3], [179.5, 180, -179.5], atol=1e-4) + np.testing.assert_allclose(res_np.lons[0, :], [-0.5, 0, 0.5, 1, 1.5, 2], atol=1e-4) + np.testing.assert_allclose(res_np.lats[:, 0], [-89.5, -90.0, -89.5, -89, -88.5, -88.0], atol=1e-3) + np.testing.assert_allclose(res_xr.lons.data, res_np.lons) + np.testing.assert_allclose(res_xr.lats.data, res_np.lats) + np.testing.assert_allclose(res_xr_dask.lons.values, res_np.lons) + np.testing.assert_allclose(res_xr_dask.lats.values, res_np.lats) + + self.assertAlmostEqual(res_xr.lons.resolution, resolution) + self.assertAlmostEqual(res_xr.lats.resolution, resolution) + + def test_reduce(self): + """Test reduce on SwathDefinitions.""" + import dask.array as da + import numpy as np + import xarray as xr + from pyresample.geometry import SwathDefinition + x = 1 + y = 0 + resolution = 4 + + lons = np.arange(-179.5, -177.5, 0.5) + lats = np.arange(-89.5, -88.0, 0.5) + lons, lats = np.meshgrid(lons, lats) + + lats_dask = da.from_array(lats, chunks=2) + lons_dask = da.from_array(lons, chunks=2) + lats_xr = xr.DataArray(lats, dims=['y', 'x'], + attrs={'resolution': resolution}) + lons_xr = xr.DataArray(lons, dims=['y', 'x'], + attrs={'resolution': resolution}) + lats_xr_dask = xr.DataArray(lats_dask, dims=['y', 'x'], + attrs={'resolution': resolution}) + lons_xr_dask = xr.DataArray(lons_dask, dims=['y', 'x'], + attrs={'resolution': resolution}) + sd_np = SwathDefinition(lons, lats) + sd_xr = SwathDefinition(lons_xr, lats_xr) + sd_xr_dask = SwathDefinition(lons_xr_dask, lats_xr_dask) + + res_np = sd_np.reduce(y=y, x=x) + res_xr = sd_xr.reduce(y=y, x=x) + res_xr_dask = sd_xr_dask.reduce(y=y, x=x) + + assert isinstance(res_np.lons, np.ndarray) + assert isinstance(res_xr.lats, xr.DataArray) + assert isinstance(res_xr_dask.lats, xr.DataArray) + assert isinstance(res_xr.lats.data, np.ndarray) + assert isinstance(res_xr_dask.lats.data, da.Array) + + np.testing.assert_allclose(res_np.lons[:, 0], lons[:,x] ) + np.testing.assert_allclose(res_np.lons[:, -1], lons[:,-x-1]) + np.testing.assert_allclose(res_xr.lons.data, res_np.lons) + np.testing.assert_allclose(res_xr.lats.data, res_np.lats) + np.testing.assert_allclose(res_xr_dask.lons.values, res_np.lons) + np.testing.assert_allclose(res_xr_dask.lats.values, res_np.lats) + + self.assertAlmostEqual(res_xr.lons.resolution, resolution) + self.assertAlmostEqual(res_xr.lats.resolution, resolution) + def test_striding(self): """Test striding.""" import dask.array as da @@ -2561,7 +2727,79 @@ def test_aggregate(self): np.testing.assert_allclose(res.area_extent, area.area_extent) self.assertEqual(res.shape[0], area.shape[0] / 2) self.assertEqual(res.shape[1], area.shape[1] / 4) + + def test_upsample(self): + """Test upsample of AreaDefinitions.""" + area = geometry.AreaDefinition('areaD', 'Europe (3km, HRV, VTC)', 'areaD', + {'a': '6378144.0', + 'b': '6356759.0', + 'lat_0': '50.00', + 'lat_ts': '50.00', + 'lon_0': '8.00', + 'proj': 'stere'}, + 800, + 800, + [-1370912.72, + -909968.64000000001, + 1029087.28, + 1490031.3600000001]) + res = area.upsample(x=4, y=2) + self.assertDictEqual(res.proj_dict, area.proj_dict) + np.testing.assert_allclose(res.area_extent, area.area_extent) + self.assertEqual(res.shape[0], area.shape[0] * 2) + self.assertEqual(res.shape[1], area.shape[1] * 4) + + def test_reduce(self): + """Test reduce of AreaDefinitions.""" + from pyresample import geometry + area = geometry.AreaDefinition('areaD', 'Europe (3km, HRV, VTC)', 'areaD', + {'a': '6378144.0', + 'b': '6356759.0', + 'lat_0': '50.00', + 'lat_ts': '50.00', + 'lon_0': '8.00', + 'proj': 'stere'}, + 800, + 800, + [-1370912.72, + -909968.64000000001, + 1029087.28, + 1490031.3600000001]) + x = 8 + y = 2 + res = area.reduce(x=x, y=y) + self.assertDictEqual(res.proj_dict, area.proj_dict) + self.assertEqual(res.shape[0], area.shape[0] - y* 2) + self.assertEqual(res.shape[1], area.shape[1] - x* 2) + + res = area.reduce(x=0, y=0) + np.testing.assert_allclose(res.area_extent, area.area_extent) + def test_extend(self): + """Test extend of AreaDefinitions.""" + from pyresample import geometry + area = geometry.AreaDefinition('areaD', 'Europe (3km, HRV, VTC)', 'areaD', + {'a': '6378144.0', + 'b': '6356759.0', + 'lat_0': '50.00', + 'lat_ts': '50.00', + 'lon_0': '8.00', + 'proj': 'stere'}, + 800, + 800, + [-1370912.72, + -909968.64000000001, + 1029087.28, + 1490031.3600000001]) + x = 8 + y = 2 + res = area.extend(x=x, y=y) + self.assertDictEqual(res.proj_dict, area.proj_dict) + self.assertEqual(res.shape[0], area.shape[0] + y* 2) + self.assertEqual(res.shape[1], area.shape[1] + x* 2) + + res = area.extend(x=0, y=0) + np.testing.assert_allclose(res.area_extent, area.area_extent) def test_enclose_areas(): """Test enclosing areas.""" @@ -2851,3 +3089,4 @@ def _is_clockwise(lons, lats): edge_sum += xdiff * ysum prev_point = point return edge_sum > 0 + From 466c5f3d3c7d95219d89a5c2f56d95ed237fc376 Mon Sep 17 00:00:00 2001 From: ghiggi Date: Tue, 18 Jan 2022 15:49:50 +0100 Subject: [PATCH 03/12] Fix small bugs --- pyresample/geometry.py | 302 ++++++++++++++++++++--------------------- 1 file changed, 150 insertions(+), 152 deletions(-) diff --git a/pyresample/geometry.py b/pyresample/geometry.py index 61a543d4e..7f3068713 100644 --- a/pyresample/geometry.py +++ b/pyresample/geometry.py @@ -715,16 +715,17 @@ def copy(self): def _do_transform(src, dst, lons, lats, alt): """Run pyproj.transform and stack the results.""" x, y, z = transform(src, dst, lons, lats, alt) - return np.dstack((x, y, z)) - - # TODO: this is more efficient than pyproj.transform. - # With pyproj > 3.1 it became thread safe: https://pyproj4.github.io/pyproj/stable/advanced_examples.html#multithreading + return np.dstack((x, y, z)) + + # TODO: this is more efficient than pyproj.transform. + # With pyproj > 3.1 it became thread safe: + # https://pyproj4.github.io/pyproj/stable/advanced_examples.html#multithreading # @staticmethod # def swath_do_transform(src, dst, lons, lats, alt): # """Run pyproj Transformer and stack the results.""" # from pyproj import Transformer # transformer = Transformer.from_crs(src.crs, dst.crs) - # x, y, z = transformer.transform(lons, lats, alt, radians=False) + # x, y, z = transformer.transform(lons, lats, alt, radians=False) # return np.dstack((x, y, z)) def aggregate(self, x=1, y=1, **kwargs): @@ -741,23 +742,23 @@ def aggregate(self, x=1, y=1, **kwargs): import dask.array as da import pyproj - # Check input validity + # Check input validity x = int(x) y = int(y) if x < 1 or y < 1: raise ValueError('x and y arguments must be positive integers larger or equal to 1.') - # Return SwathDefinition if nothing to aggregate + # Return SwathDefinition if nothing to aggregate if x==1 and y==1: return self - # Define geodetic and geocentric projection + # Define geodetic and geocentric projection geocent = pyproj.Proj(proj='geocent') latlong = pyproj.Proj(proj='latlong') # Get xr.DataArray with dask array - src_lats, src_lats_format = _convert_2D_array(self.lats, to='DataArray_Dask', dims=['lats','lons']) - src_lons, src_lons_format = _convert_2D_array(self.lons, to='DataArray_Dask', dims=['lats','lons']) + src_lats, src_lats_format = _convert_2D_array(self.lats, to='DataArray_Dask', dims=['y','x']) + src_lons, src_lons_format = _convert_2D_array(self.lons, to='DataArray_Dask', dims=['y','x']) # Conversion to Geocentric Cartesian (x,y,z) CRS res = da.map_blocks(self._do_transform, latlong, geocent, @@ -766,12 +767,12 @@ def aggregate(self, x=1, y=1, **kwargs): da.zeros_like(src_lons), # altitude new_axis=[2], chunks=(src_lons.chunks[0], src_lons.chunks[1], 3)) - res = xr.DataArray(res, dims=['y', 'x', 'coord'], coords=src_lons.coords) + res = xr.DataArray(res, dims=['y', 'x', 'xyz'], coords=src_lons.coords) - # Aggregating + # Aggregating res = res.coarsen(x=x, y=y, **kwargs).mean() - # Back-conversion to geographic CRS + # Back-conversion to geographic CRS lonlatalt = da.map_blocks(self._do_transform, geocent, latlong, res[:, :, 0].data, # x res[:, :, 1].data, # y @@ -780,14 +781,14 @@ def aggregate(self, x=1, y=1, **kwargs): chunks=res.data.chunks) # Back-conversion array as input format - lons, _ = _convert_2D_array(lonlatalt[:, :, 0], to=src_lons_format, dims=['lats','lons']) - lats, _ = _convert_2D_array(lonlatalt[:, :, 1], to=src_lats_format, dims=['lats','lons']) + lons, _ = _convert_2D_array(lonlatalt[:, :, 0], to=src_lons_format, dims=['y','x']) + lats, _ = _convert_2D_array(lonlatalt[:, :, 1], to=src_lats_format, dims=['y','x']) # Add additional info if the source array is a DataArray if isinstance(self.lats, xr.DataArray) and isinstance(self.lons, xr.DataArray): - lats.coords = res.coords + lats = lats.assign_coords(res.coords) lats.attrs = self.lats.attrs.copy() - lons.coords = res.coords + lons = lons.assign_coords(res.coords) lons.attrs = self.lons.attrs.copy() try: resolution = lons.attrs['resolution'] * ((x + y) / 2) @@ -882,8 +883,8 @@ def _upsample_centroid(centroid, x_factor=1, y_factor=1): latlong = pyproj.Proj(proj='latlong') # Get xr.DataArray with dask array - src_lats, src_lats_format = _convert_2D_array(self.lats, to='DataArray_Dask', dims=['lats','lons']) - src_lons, src_lons_format = _convert_2D_array(self.lons, to='DataArray_Dask', dims=['lats','lons']) + src_lats, src_lats_format = _convert_2D_array(self.lats, to='DataArray_Dask', dims=['y','x']) + src_lons, src_lons_format = _convert_2D_array(self.lons, to='DataArray_Dask', dims=['y','x']) # Conversion to Geocentric Cartesian (x,y,z) CRS res = da.map_blocks(self._do_transform, latlong, geocent, @@ -917,17 +918,15 @@ def _upsample_centroid(centroid, x_factor=1, y_factor=1): chunks=new_centroids.data.chunks) # Back-conversion array as input format - lons, _ = _convert_2D_array(lonlatalt[:, :, 0], to=src_lons_format, dims=['lats','lons']) - lats, _ = _convert_2D_array(lonlatalt[:, :, 1], to=src_lats_format, dims=['lats','lons']) + lons, _ = _convert_2D_array(lonlatalt[:, :, 0], to=src_lons_format, dims=['y','x']) + lats, _ = _convert_2D_array(lonlatalt[:, :, 1], to=src_lats_format, dims=['y','x']) # Add additional info if the source array is a DataArray if isinstance(self.lats, xr.DataArray) and isinstance(self.lons, xr.DataArray): - lats.coords = res.coords lats.attrs = self.lats.attrs.copy() - lons.coords = res.coords lons.attrs = self.lons.attrs.copy() try: - resolution = lons.attrs['resolution'] * ((x + y) / 2) + resolution = lons.attrs['resolution'] / ((x + y) / 2) lons.attrs['resolution'] = resolution lats.attrs['resolution'] = resolution except KeyError: @@ -953,8 +952,8 @@ def extend(self, x=0, y=0): return self # Get lats/lons numpy arrays - src_lats, src_lats_format = _convert_2D_array(self.lats, to='numpy', dims=['lats','lons']) - src_lons, src_lons_format = _convert_2D_array(self.lons, to='numpy', dims=['lats','lons']) + src_lats, src_lats_format = _convert_2D_array(self.lats, to='numpy', dims=['y','x']) + src_lons, src_lons_format = _convert_2D_array(self.lons, to='numpy', dims=['y','x']) dst_lats = src_lats dst_lons = src_lons @@ -982,8 +981,8 @@ def extend(self, x=0, y=0): dst_lons = np.concatenate((extended_side3_lonlats[0][:, ::-1], dst_lons), axis=1) # Back-conversion array as input format - lons, _ = _convert_2D_array(dst_lons, to=src_lons_format, dims=['lats','lons']) - lats, _ = _convert_2D_array(dst_lats, to=src_lats_format, dims=['lats','lons']) + lons, _ = _convert_2D_array(dst_lons, to=src_lons_format, dims=['y','x']) + lats, _ = _convert_2D_array(dst_lats, to=src_lats_format, dims=['y','x']) # Add additional info if the source array is a DataArray if isinstance(self.lats, xr.DataArray) and isinstance(self.lons, xr.DataArray): @@ -1162,6 +1161,124 @@ def compute_optimal_bb_area(self, proj_dict=None): lons, lats = self.get_edge_lonlats() return area.freeze((lons, lats), shape=(height, width)) +def _convert_2D_array(arr, to, dims=None): + """ + Convert a 2D array to a specific format. + Useful to return swath lons, lats in the same original format after processing. + + Parameters + ---------- + arr : (np.ndarray, da.Array, xr.DataArray) + The 2D array to be converted to another array format. + to : TYPE + The desired array output format. + Accepted formats are: ['Numpy','Dask', 'DataArray_Numpy','DataArray_Dask'] + dims : tuple, optional + Optional argument for the specification of DataArray dimension names + if input array is Numpy or Dask. + Provide a tuple with (y_dimname, x_dimname). + The default is None --> (dim_0, dim_1) + + Returns + ------- + dst_arr : (np.ndarray, da.Array, xr.DataArray) + The converted 2D array. + src_format: str + The source format of the 2D array. + + """ + import numpy as np + import dask.array as da + import xarray as xr + # Checks + valid_format = ['Numpy','Dask', 'DataArray_Numpy','DataArray_Dask'] + if not isinstance(to, str): + raise TypeError("'to' must be a string indicating the conversion array format.") + if not np.isin(to.lower(), np.char.lower(valid_format)): + raise ValueError("Valid conversion array formats are {}".format(valid_format)) + if not isinstance(arr, (np.ndarray, da.Array, xr.DataArray)): + raise TypeError("The provided array must be either a np.ndarray, a dask.Array or a xr.DataArray.") + # Numpy + if isinstance(arr, np.ndarray): + if to.lower() == 'numpy': + dst_arr = arr + elif to.lower() == 'dask': + dst_arr = da.from_array(arr) + elif to.lower() == 'dataarray_numpy': + dst_arr = xr.DataArray(arr, dims=dims) + elif to.lower() == 'dataarray_dask': + dst_arr = xr.DataArray(da.from_array(arr), dims=dims) + else: + raise NotImplementedError + return dst_arr, 'numpy' + # Dask + elif isinstance(arr, da.Array): + if to.lower() == 'numpy': + dst_arr = arr.compute() + elif to.lower() == 'dask': + dst_arr = arr + elif to.lower() == 'dataarray_numpy': + dst_arr = xr.DataArray(arr.compute() , dims=dims) + elif to.lower() == 'dataarray_dask': + dst_arr = xr.DataArray(arr, dims=dims) + else: + raise NotImplementedError + return dst_arr, 'dask' + + # DataArray_Numpy + elif isinstance(arr, xr.DataArray) and isinstance(arr.data, np.ndarray): + if to.lower() == 'numpy': + dst_arr = arr.data + elif to.lower() == 'dask': + dst_arr = da.from_array(arr.data) + elif to.lower() == 'dataarray_numpy': + dst_arr = arr + elif to.lower() == 'dataarray_dask': + dst_arr = xr.DataArray(da.from_array(arr.data), dims=dims) + else: + raise NotImplementedError + return dst_arr, 'DataArray_Numpy' + + # DataArray_Dask + elif isinstance(arr, xr.DataArray) and isinstance(arr.data, da.Array): + if to.lower() == 'numpy': + dst_arr = arr.data.compute() + elif to.lower() == 'dask': + dst_arr = arr.data + elif to.lower() == 'dataarray_numpy': + dst_arr = arr.compute() + elif to.lower() == 'dataarray_dask': + dst_arr = arr + else: + raise NotImplementedError + return dst_arr, 'DataArray_Dask' + + else: + raise NotImplementedError + +def _get_extended_lonlats(lon_start, lat_start, lon_end, lat_end, npts, transpose=True): + """Utils employed by SwathDefinition.extend. + It extrapolate npts following the forward azimuth with an interdistance + equal to the distance between the starting point and the end point. + """ + import pyproj + geod = pyproj.Geod(ellps='sphere') # TODO: sphere or WGS84? + # geod = pyproj.Geod(ellps='WGS84') # sphere + az12_arr, _, dist_arr = geod.inv(lon_start, lat_start, lon_end, lat_end) + list_lat = [] + list_lon = [] + for lon, lat, az12, dist in zip(lon_end, lat_end, az12_arr, dist_arr): + points = geod.fwd_intermediate(lon, lat, az12, del_s=dist, npts=npts, + out_lons=None, out_lats=None, radians=False) + list_lat.append(points.lats) + list_lon.append(points.lons) + + new_lats = np.stack(list_lat) + new_lons = np.stack(list_lon) + if transpose: + new_lats = new_lats.T + new_lons = new_lons.T + return new_lons, new_lats class DynamicAreaDefinition(object): """An AreaDefintion containing just a subset of the needed parameters. @@ -1698,13 +1815,13 @@ def aggregate(self, **dims): height = int(self.height / dims.get('y', 1)) return self.copy(height=height, width=width) - def areadef_upsample(self, x=1, y=1): + def upsample(self, x=1, y=1): """Return an upsampled version of the area.""" width = int(self.width * x) height = int(self.height * y) return self.copy(height=height, width=width) - def areadef_extend(self, x=0, y=0): + def extend(self, x=0, y=0): """Extend AreaDef by x/y pixels on each side.""" if self.is_geostationary: raise NotImplementedError("'extend' method is not implemented for GEO AreaDefinition.") @@ -1739,7 +1856,7 @@ def areadef_extend(self, x=0, y=0): return area_def - def areadef_reduce(self, x=0, y=0): + def reduce(self, x=0, y=0): """Reduce AreaDef by x/y pixels on each side.""" if self.is_geostationary: raise NotImplementedError("'reduce' method is not implemented for GEO AreaDefinition.") @@ -1758,8 +1875,8 @@ def areadef_reduce(self, x=0, y=0): raise ValueError("You can at maximum reduce height (y) of AreaDef by {} pixels on each side.".format(max_y)) # Retrieve pixel and area info - new_width = self.width + 2*x - new_height = self.height + 2*y + new_width = self.width - 2*x + new_height = self.height - 2*y pixel_size_x = self.pixel_size_x pixel_size_y = self.pixel_size_y area_extent = self._area_extent @@ -3125,125 +3242,6 @@ def update_hash(self, the_hash=None): the_hash = areadef.update_hash(the_hash) return the_hash -def _convert_2D_array(arr, to, dims=None): - """ - Convert a 2D array to a specific format. - Useful to return swath lons, lats in the same original format after processing. - - Parameters - ---------- - arr : (np.ndarray, da.Array, xr.DataArray) - The 2D array to be converted to another array format. - to : TYPE - The desired array output format. - Accepted formats are: ['Numpy','Dask', 'DataArray_Numpy','DataArray_Dask'] - dims : tuple, optional - Optional argument for the specification of DataArray dimension names - if input array is Numpy or Dask. - Provide a tuple with (y_dimname, x_dimname). - The default is None --> (dim_0, dim_1) - - Returns - ------- - dst_arr : (np.ndarray, da.Array, xr.DataArray) - The converted 2D array. - src_format: str - The source format of the 2D array. - - """ - import numpy as np - import dask.array as da - import xarray as xr - # Checks - valid_format = ['Numpy','Dask', 'DataArray_Numpy','DataArray_Dask'] - if not isinstance(to, str): - raise TypeError("'to' must be a string indicating the conversion array format.") - if not np.isin(to.lower(), np.char.lower(valid_format)): - raise ValueError("Valid conversion array formats are {}".format(valid_format)) - if not isinstance(arr, (np.ndarray, da.Array, xr.DataArray)): - raise TypeError("The provided array must be either a np.ndarray, a dask.Array or a xr.DataArray.") - # Numpy - if isinstance(arr, np.ndarray): - if to.lower() == 'numpy': - dst_arr = arr - elif to.lower() == 'dask': - dst_arr = da.from_array(arr) - elif to.lower() == 'dataarray_numpy': - dst_arr = xr.DataArray(arr, dims=dims) - elif to.lower() == 'dataarray_dask': - dst_arr = xr.DataArray(da.from_array(arr), dims=dims) - else: - raise NotImplementedError - return dst_arr, 'numpy' - # Dask - elif isinstance(arr, da.Array): - if to.lower() == 'numpy': - dst_arr = arr.compute() - elif to.lower() == 'dask': - dst_arr = arr - elif to.lower() == 'dataarray_numpy': - dst_arr = xr.DataArray(arr.compute() , dims=dims) - elif to.lower() == 'dataarray_dask': - dst_arr = xr.DataArray(arr, dims=dims) - else: - raise NotImplementedError - return dst_arr, 'dask' - - # DataArray_Numpy - elif isinstance(arr, xr.DataArray) and isinstance(arr.data, np.ndarray): - if to.lower() == 'numpy': - dst_arr = arr.data - elif to.lower() == 'dask': - dst_arr = da.from_array(arr.data) - elif to.lower() == 'dataarray_numpy': - dst_arr = arr - elif to.lower() == 'dataarray_dask': - dst_arr = xr.DataArray(da.from_array(arr.data), dims=dims) - else: - raise NotImplementedError - return dst_arr, 'DataArray_Numpy' - - # DataArray_Dask - elif isinstance(arr, xr.DataArray) and isinstance(arr.data, da.Array): - if to.lower() == 'numpy': - dst_arr = arr.data.compute() - elif to.lower() == 'dask': - dst_arr = arr.data - elif to.lower() == 'dataarray_numpy': - dst_arr = arr.compute() - elif to.lower() == 'dataarray_dask': - dst_arr = arr - else: - raise NotImplementedError - return dst_arr, 'DataArray_Dask' - - else: - raise NotImplementedError - -def _get_extended_lonlats(lon_start, lat_start, lon_end, lat_end, npts, transpose=True): - """Utils employed by SwathDefinition.extend. - It extrapolate npts following the forward azimuth with an interdistance - equal to the distance between the starting point and the end point. - """ - import pyproj - geod = pyproj.Geod(ellps='sphere') # TODO: sphere or WGS84? - # geod = pyproj.Geod(ellps='WGS84') # sphere - az12_arr, _, dist_arr = geod.inv(lon_start, lat_start, lon_end, lat_end) - list_lat = [] - list_lon = [] - for lon, lat, az12, dist in zip(lon_end, lat_end, az12_arr, dist_arr): - points = geod.fwd_intermediate(lon, lat, az12, del_s=dist, npts=npts, - out_lons=None, out_lats=None, radians=False) - list_lat.append(points.lats) - list_lon.append(points.lons) - - new_lats = np.stack(list_lat) - new_lons = np.stack(list_lon) - if transpose: - new_lats = new_lats.T - new_lons = new_lons.T - return new_lons, new_lats - def _get_slice(segments, shape): """Segment a 1D or 2D array.""" if not (1 <= len(shape) <= 2): From 58d817657ad45c49706a5bc14406a5e33f7d6932 Mon Sep 17 00:00:00 2001 From: ghiggi Date: Tue, 18 Jan 2022 16:08:55 +0100 Subject: [PATCH 04/12] Fix linting errors. --- pyresample/geometry.py | 459 ++++++++++++++++--------------- pyresample/test/test_geometry.py | 139 +++++----- 2 files changed, 303 insertions(+), 295 deletions(-) diff --git a/pyresample/geometry.py b/pyresample/geometry.py index 7f3068713..5b79ced9d 100644 --- a/pyresample/geometry.py +++ b/pyresample/geometry.py @@ -716,7 +716,7 @@ def _do_transform(src, dst, lons, lats, alt): """Run pyproj.transform and stack the results.""" x, y, z = transform(src, dst, lons, lats, alt) return np.dstack((x, y, z)) - + # TODO: this is more efficient than pyproj.transform. # With pyproj > 3.1 it became thread safe: # https://pyproj4.github.io/pyproj/stable/advanced_examples.html#multithreading @@ -730,60 +730,60 @@ def _do_transform(src, dst, lons, lats, alt): def aggregate(self, x=1, y=1, **kwargs): """Downsample the swath definition by averaging the coordinates along x and y dimension. - - Builds upon xarray.DataArray.coarsen function. + + Builds upon xarray.DataArray.coarsen function. To downsample of a factor of 2, call swath_def.aggregate(x=2, y=2) - swath_def.aggregate(x=1, y=1) simply returns the current swath_def. + swath_def.aggregate(x=1, y=1) simply returns the current swath_def. By default, it raise a ValueError if the dimension size is not a multiple of the window size. This can be changed by passing boundary="trim" or boundary="pad", but behaviour within pyresample is undefined. See https://xarray.pydata.org/en/stable/generated/xarray.DataArray.coarsen.html for further details. """ - import xarray as xr + import xarray as xr import dask.array as da import pyproj - + # Check input validity x = int(x) y = int(y) - if x < 1 or y < 1: + if x < 1 or y < 1: raise ValueError('x and y arguments must be positive integers larger or equal to 1.') - + # Return SwathDefinition if nothing to aggregate - if x==1 and y==1: - return self - + if x == 1 and y == 1: + return self + # Define geodetic and geocentric projection geocent = pyproj.Proj(proj='geocent') latlong = pyproj.Proj(proj='latlong') - + # Get xr.DataArray with dask array - src_lats, src_lats_format = _convert_2D_array(self.lats, to='DataArray_Dask', dims=['y','x']) - src_lons, src_lons_format = _convert_2D_array(self.lons, to='DataArray_Dask', dims=['y','x']) - + src_lats, src_lats_format = _convert_2D_array(self.lats, to='DataArray_Dask', dims=['y', 'x']) + src_lons, src_lons_format = _convert_2D_array(self.lons, to='DataArray_Dask', dims=['y', 'x']) + # Conversion to Geocentric Cartesian (x,y,z) CRS res = da.map_blocks(self._do_transform, latlong, geocent, - src_lons.data, + src_lons.data, src_lats.data, - da.zeros_like(src_lons), # altitude + da.zeros_like(src_lons), # altitude new_axis=[2], chunks=(src_lons.chunks[0], src_lons.chunks[1], 3)) res = xr.DataArray(res, dims=['y', 'x', 'xyz'], coords=src_lons.coords) - + # Aggregating res = res.coarsen(x=x, y=y, **kwargs).mean() - + # Back-conversion to geographic CRS lonlatalt = da.map_blocks(self._do_transform, geocent, latlong, - res[:, :, 0].data, # x - res[:, :, 1].data, # y - res[:, :, 2].data, # z + res[:, :, 0].data, # x + res[:, :, 1].data, # y + res[:, :, 2].data, # z new_axis=[2], chunks=res.data.chunks) - + # Back-conversion array as input format - lons, _ = _convert_2D_array(lonlatalt[:, :, 0], to=src_lons_format, dims=['y','x']) - lats, _ = _convert_2D_array(lonlatalt[:, :, 1], to=src_lats_format, dims=['y','x']) - + lons, _ = _convert_2D_array(lonlatalt[:, :, 0], to=src_lons_format, dims=['y', 'x']) + lats, _ = _convert_2D_array(lonlatalt[:, :, 1], to=src_lats_format, dims=['y', 'x']) + # Add additional info if the source array is a DataArray if isinstance(self.lats, xr.DataArray) and isinstance(self.lons, xr.DataArray): lats = lats.assign_coords(res.coords) @@ -791,20 +791,20 @@ def aggregate(self, x=1, y=1, **kwargs): lons = lons.assign_coords(res.coords) lons.attrs = self.lons.attrs.copy() try: - resolution = lons.attrs['resolution'] * ((x + y) / 2) + resolution = lons.attrs['resolution'] * ((x + y) / 2) lons.attrs['resolution'] = resolution lats.attrs['resolution'] = resolution except KeyError: pass - - # Return the downsampled swath definition + + # Return the downsampled swath definition return SwathDefinition(lons, lats) - + def upsample(self, x=1, y=1): """Upsample the swath definition along x (along-track) and y (cross-track) dimensions. - + To upsample of a factor of 2 (each pixel splitted in 4 pixels), call swath_def.upsample(x=2, y=2). - swath_def.upsample(x=1, y=1) simply returns the current swath_def. + swath_def.upsample(x=1, y=1) simply returns the current swath_def. """ # TODO: An alternative would be to use geotiepoints.geointerpolator.GeoInterpolator # But I have some problem using it, see code snippet in the PR description. @@ -814,53 +814,53 @@ def upsample(self, x=1, y=1): import pyproj from xarray.plot.utils import _infer_interval_breaks # https://github.com/pydata/xarray/blob/main/xarray/plot/utils.py#L784 - + def _upsample_ranges_1D(x, factor=1): ranges2D = np.linspace(x[:-1], x[1:], num=factor, endpoint=False, axis=1) - return np.concatenate((ranges2D.ravel(),[x[-1]])) - + return np.concatenate((ranges2D.ravel(), [x[-1]])) + def upsample_ranges_2D(x, factor=1, axis=0): x = np.array(x) - if x.ndim not in [1,2]: + if x.ndim not in [1, 2]: raise ValueError("Expects 1D or 2D array.") - if not isinstance(axis, int): - raise TypeError("'axis' must be: 0 or 1 integer.") - if axis not in [0, 1]: + if not isinstance(axis, int): + raise TypeError("'axis' must be: 0 or 1 integer.") + if axis not in [0, 1]: raise ValueError("Expects 'axis' 0 or 1") - if not isinstance(factor, int): + if not isinstance(factor, int): raise TypeError("'factor' must be an integer equal or larger than 1.") - if factor < 1: + if factor < 1: raise ValueError("'factor' must be an integer equal or larger than 1.") - + if x.ndim == 1: return _upsample_ranges_1D(x, factor=factor) else: l_ranges = [] - if axis==1: + if axis == 1: for i in range(x.shape[0]): - l_ranges.append(_upsample_ranges_1D(x[i,:], factor=factor)) + l_ranges.append(_upsample_ranges_1D(x[i, :], factor=factor)) return np.vstack(l_ranges) - else: # axis = 0 + else: # axis = 0 for i in range(x.shape[1]): - l_ranges.append(_upsample_ranges_1D(x[:,i], factor=factor)) + l_ranges.append(_upsample_ranges_1D(x[:, i], factor=factor)) return np.vstack(l_ranges).transpose() - + def _upsample_corners(corners, x_factor=1, y_factor=1): new_breaks_xx = upsample_ranges_2D(corners, factor=x_factor, axis=1) new_corners = upsample_ranges_2D(new_breaks_xx, factor=y_factor, axis=0) return new_corners - + def _get_corners_from_centroids(centroids): breaks_xx = _infer_interval_breaks(centroids, axis=1) corners = _infer_interval_breaks(breaks_xx, axis=0) return corners - + def _get_centroids_from_corners(corners): centroids = (corners[1:, 1:] + corners[:-1, :-1]) / 2 return centroids - - # TODO: Decide if compute in memory or with dask - def upsample_centroids(centroid_x, centroid_y, centroid_z, x_factor=1, y_factor=1): + + # TODO: Decide if compute in memory or with dask + def upsample_centroids(centroid_x, centroid_y, centroid_z, x_factor=1, y_factor=1): corners_x = _get_corners_from_centroids(centroid_x) corners_y = _get_corners_from_centroids(centroid_y) corners_z = _get_corners_from_centroids(centroid_z) @@ -871,130 +871,131 @@ def upsample_centroids(centroid_x, centroid_y, centroid_z, x_factor=1, y_factor= y_new_centroids = _get_centroids_from_corners(y_new_corners) z_new_centroids = _get_centroids_from_corners(z_new_corners) return x_new_centroids, y_new_centroids, z_new_centroids - + def _upsample_centroid(centroid, x_factor=1, y_factor=1): corners = _get_corners_from_centroids(centroid) new_corners = _upsample_corners(corners, x_factor=x_factor, y_factor=y_factor) new_centroids = _get_centroids_from_corners(new_corners) return new_centroids - - # Define geodetic and geocentric projection + + # Define geodetic and geocentric projection geocent = pyproj.Proj(proj='geocent') latlong = pyproj.Proj(proj='latlong') - + # Get xr.DataArray with dask array - src_lats, src_lats_format = _convert_2D_array(self.lats, to='DataArray_Dask', dims=['y','x']) - src_lons, src_lons_format = _convert_2D_array(self.lons, to='DataArray_Dask', dims=['y','x']) - + src_lats, src_lats_format = _convert_2D_array(self.lats, to='DataArray_Dask', dims=['y', 'x']) + src_lons, src_lons_format = _convert_2D_array(self.lons, to='DataArray_Dask', dims=['y', 'x']) + # Conversion to Geocentric Cartesian (x,y,z) CRS res = da.map_blocks(self._do_transform, latlong, geocent, - src_lons.data, + src_lons.data, src_lats.data, - da.zeros_like(src_lons), # altitude + da.zeros_like(src_lons), # altitude new_axis=[2], chunks=(src_lons.chunks[0], src_lons.chunks[1], 3)) res = xr.DataArray(res, dims=['y', 'x', 'xyz']) - - # Retrieve new centroids + + # Retrieve new centroids # TODO: make it dask compatible using _upsample_centroid_dask [HELP WANTED] # res1 = da.apply_along_axis(_upsample_centroid_dask, - # 2, + # 2, # res.data, - # x, + # x, # y) # res1 = xr.DataArray(res1, dims=['y', 'x', 'coord'], coords=src_lons.coords) - - res = np.stack(upsample_centroids(res[:,:,0].data, - res[:,:,1].data, - res[:,:,2].data, x_factor=x, y_factor=y), axis=2) - new_centroids = xr.DataArray(da.from_array(res), dims=['y', 'x', 'xyz']) - - # Back-conversion to geographic CRS + + res = np.stack(upsample_centroids(res[:, :, 0].data, + res[:, :, 1].data, + res[:, :, 2].data, x_factor=x, y_factor=y), axis=2) + new_centroids = xr.DataArray(da.from_array(res), dims=['y', 'x', 'xyz']) + + # Back-conversion to geographic CRS lonlatalt = da.map_blocks(self._do_transform, geocent, latlong, - new_centroids[:, :, 0].data, # x - new_centroids[:, :, 1].data, # y - new_centroids[:, :, 2].data, # z + new_centroids[:, :, 0].data, # x + new_centroids[:, :, 1].data, # y + new_centroids[:, :, 2].data, # z new_axis=[2], chunks=new_centroids.data.chunks) - + # Back-conversion array as input format - lons, _ = _convert_2D_array(lonlatalt[:, :, 0], to=src_lons_format, dims=['y','x']) - lats, _ = _convert_2D_array(lonlatalt[:, :, 1], to=src_lats_format, dims=['y','x']) - + lons, _ = _convert_2D_array(lonlatalt[:, :, 0], to=src_lons_format, dims=['y', 'x']) + lats, _ = _convert_2D_array(lonlatalt[:, :, 1], to=src_lats_format, dims=['y', 'x']) + # Add additional info if the source array is a DataArray if isinstance(self.lats, xr.DataArray) and isinstance(self.lons, xr.DataArray): lats.attrs = self.lats.attrs.copy() lons.attrs = self.lons.attrs.copy() try: - resolution = lons.attrs['resolution'] / ((x + y) / 2) + resolution = lons.attrs['resolution'] / ((x + y) / 2) lons.attrs['resolution'] = resolution lats.attrs['resolution'] = resolution except KeyError: pass - - # Return the downsampled swath definition + + # Return the downsampled swath definition return SwathDefinition(lons, lats) - + def extend(self, x=0, y=0): """Extend the swath definition along x (along-track) and y (across-track) dimensions. - By default, it does not extend on any direction. + By default, it does not extend on any direction. To extend of n pixel on both sides of the across-track direction, call swath_def.extend(x=0, y=2). """ - import xarray as xr - # Check input validity + import xarray as xr + # Check input validity x = int(x) y = int(y) - if x < 0 or y < 0: + if x < 0 or y < 0: raise ValueError('x and y arguments must be positive integers.') - - # Return SwathDefinition if nothing to extend - if x==0 and y==0: - return self - + + # Return SwathDefinition if nothing to extend + if x == 0 and y == 0: + return self + # Get lats/lons numpy arrays - src_lats, src_lats_format = _convert_2D_array(self.lats, to='numpy', dims=['y','x']) - src_lons, src_lons_format = _convert_2D_array(self.lons, to='numpy', dims=['y','x']) - + src_lats, src_lats_format = _convert_2D_array(self.lats, to='numpy', dims=['y', 'x']) + src_lons, src_lons_format = _convert_2D_array(self.lons, to='numpy', dims=['y', 'x']) + dst_lats = src_lats dst_lons = src_lons - + # Extend on y direction (side0 and side2) if y > 0: - list_side0 = (src_lons[1,:], src_lats[1,:], src_lons[0,:], src_lats[0,:]) - list_side2 = (src_lons[-2,:], src_lats[-2,:], src_lons[-1,:], src_lats[-1,:]) + list_side0 = (src_lons[1, :], src_lats[1, :], src_lons[0, :], src_lats[0, :]) + list_side2 = (src_lons[-2, :], src_lats[-2, :], src_lons[-1, :], src_lats[-1, :]) extended_side0_lonlats = _get_extended_lonlats(*list_side0, npts=y) extended_side2_lonlats = _get_extended_lonlats(*list_side2, npts=y) - dst_lats = np.concatenate((extended_side0_lonlats[1][::-1,:], dst_lats), axis=0) + dst_lats = np.concatenate((extended_side0_lonlats[1][::-1, :], dst_lats), axis=0) dst_lats = np.concatenate((dst_lats, extended_side2_lonlats[1]), axis=0) - dst_lons = np.concatenate((extended_side0_lonlats[0][::-1,:], dst_lons), axis=0) + dst_lons = np.concatenate((extended_side0_lonlats[0][::-1, :], dst_lons), axis=0) dst_lons = np.concatenate((dst_lons, extended_side2_lonlats[0]), axis=0) - - # Extend on x direction (side1 and side3) - if x > 0: - list_side1 = (dst_lons[:,-2], dst_lats[:,-2], dst_lons[:,-1], dst_lats[:,-1]) - list_side3 = (dst_lons[:, 1], dst_lats[:, 1], dst_lons[:, 0], dst_lats[:, 0]) - extended_side1_lonlats = _get_extended_lonlats(*list_side1, npts=x, transpose=False) - extended_side3_lonlats = _get_extended_lonlats(*list_side3, npts=x, transpose=False) - dst_lats = np.concatenate((dst_lats, extended_side1_lonlats[1]), axis=1) - dst_lats = np.concatenate((extended_side3_lonlats[1][:,::-1], dst_lats), axis=1) - dst_lons = np.concatenate((dst_lons, extended_side1_lonlats[0]), axis=1) - dst_lons = np.concatenate((extended_side3_lonlats[0][:, ::-1], dst_lons), axis=1) - + + # Extend on x direction (side1 and side3) + if x > 0: + list_side1 = (dst_lons[:, -2], dst_lats[:, -2], dst_lons[:, -1], dst_lats[:, -1]) + list_side3 = (dst_lons[:, 1], dst_lats[:, 1], dst_lons[:, 0], dst_lats[:, 0]) + extended_side1_lonlats = _get_extended_lonlats(*list_side1, npts=x, transpose=False) + extended_side3_lonlats = _get_extended_lonlats(*list_side3, npts=x, transpose=False) + dst_lats = np.concatenate((dst_lats, extended_side1_lonlats[1]), axis=1) + dst_lats = np.concatenate((extended_side3_lonlats[1][:, ::-1], dst_lats), axis=1) + dst_lons = np.concatenate((dst_lons, extended_side1_lonlats[0]), axis=1) + dst_lons = np.concatenate((extended_side3_lonlats[0][:, ::-1], dst_lons), axis=1) + # Back-conversion array as input format - lons, _ = _convert_2D_array(dst_lons, to=src_lons_format, dims=['y','x']) - lats, _ = _convert_2D_array(dst_lats, to=src_lats_format, dims=['y','x']) - + lons, _ = _convert_2D_array(dst_lons, to=src_lons_format, dims=['y', 'x']) + lats, _ = _convert_2D_array(dst_lats, to=src_lats_format, dims=['y', 'x']) + # Add additional info if the source array is a DataArray if isinstance(self.lats, xr.DataArray) and isinstance(self.lons, xr.DataArray): lats.attrs = self.lats.attrs.copy() lons.attrs = self.lons.attrs.copy() - - # Return the extended SwathDefinition + + # Return the extended SwathDefinition return SwathDefinition(lons, lats) - + def reduce(self, x=0, y=0): """Reduce the swath definition along x (along-track) and y (across-track) dimensions. - By default, it does not reduce on any direction. + + By default, it does not reduce on any direction. To reduce of n pixel on both sides of the across-track direction, call swath_def.reduce(x=0, y=2). """ # Check input validity (ensure reduced area is at least 2x2) @@ -1002,22 +1003,24 @@ def reduce(self, x=0, y=0): width = self.lats.shape[1] x = int(x) y = int(y) - if x < 0 or y < 0: + if x < 0 or y < 0: raise ValueError('x and y arguments must be positive integers.') - if x >= np.floor(width/2): - max_x = int(np.floor(width/2)) - 1 - raise ValueError("You can at maximum reduce the along-track direction (x) of SwathDef by {} pixels on each side.".format(max_x)) - if y >= np.floor(height/2): - max_y = int(np.floor(height/2)) - 1 - raise ValueError("You can at maximum reduce the across-track direction (y) of SwathDef by {} pixels on each side.".format(max_y)) - - # Return SwathDefinition if nothing to reduce - if x==0 and y==0: - return self - + if x >= np.floor(width / 2): + max_x = int(np.floor(width / 2)) - 1 + raise ValueError("""You can at maximum reduce the along-track direction (x) + of SwathDef by {} pixels on each side.""".format(max_x)) + if y >= np.floor(height / 2): + max_y = int(np.floor(height / 2)) - 1 + raise ValueError("""You can at maximum reduce the across-track direction (y) + of SwathDef by {} pixels on each side.""".format(max_y)) + + # Return SwathDefinition if nothing to reduce + if x == 0 and y == 0: + return self + # Return the reduced SwathDefinition - lats = self.lats[slice(0+y,height-y), slice(0+x,width-x)] - lons = self.lons[slice(0+y,height-y), slice(0+x,width-x)] + lats = self.lats[slice(0 + y, height - y), slice(0 + x, width - x)] + lons = self.lons[slice(0 + y, height - y), slice(0 + x, width - x)] return SwathDefinition(lons, lats) def __hash__(self): @@ -1161,9 +1164,10 @@ def compute_optimal_bb_area(self, proj_dict=None): lons, lats = self.get_edge_lonlats() return area.freeze((lons, lats), shape=(height, width)) -def _convert_2D_array(arr, to, dims=None): + +def _convert_2D_array(arr, to, dims=None): """ - Convert a 2D array to a specific format. + Convert a 2D array to a specific format. Useful to return swath lons, lats in the same original format after processing. Parameters @@ -1174,7 +1178,7 @@ def _convert_2D_array(arr, to, dims=None): The desired array output format. Accepted formats are: ['Numpy','Dask', 'DataArray_Numpy','DataArray_Dask'] dims : tuple, optional - Optional argument for the specification of DataArray dimension names + Optional argument for the specification of DataArray dimension names if input array is Numpy or Dask. Provide a tuple with (y_dimname, x_dimname). The default is None --> (dim_0, dim_1) @@ -1183,64 +1187,64 @@ def _convert_2D_array(arr, to, dims=None): ------- dst_arr : (np.ndarray, da.Array, xr.DataArray) The converted 2D array. - src_format: str + src_format: str The source format of the 2D array. """ import numpy as np import dask.array as da import xarray as xr - # Checks - valid_format = ['Numpy','Dask', 'DataArray_Numpy','DataArray_Dask'] - if not isinstance(to, str): + # Checks + valid_format = ['Numpy', 'Dask', 'DataArray_Numpy', 'DataArray_Dask'] + if not isinstance(to, str): raise TypeError("'to' must be a string indicating the conversion array format.") if not np.isin(to.lower(), np.char.lower(valid_format)): raise ValueError("Valid conversion array formats are {}".format(valid_format)) if not isinstance(arr, (np.ndarray, da.Array, xr.DataArray)): raise TypeError("The provided array must be either a np.ndarray, a dask.Array or a xr.DataArray.") - # Numpy - if isinstance(arr, np.ndarray): + # Numpy + if isinstance(arr, np.ndarray): if to.lower() == 'numpy': dst_arr = arr elif to.lower() == 'dask': dst_arr = da.from_array(arr) elif to.lower() == 'dataarray_numpy': - dst_arr = xr.DataArray(arr, dims=dims) + dst_arr = xr.DataArray(arr, dims=dims) elif to.lower() == 'dataarray_dask': - dst_arr = xr.DataArray(da.from_array(arr), dims=dims) + dst_arr = xr.DataArray(da.from_array(arr), dims=dims) else: raise NotImplementedError return dst_arr, 'numpy' - # Dask - elif isinstance(arr, da.Array): + # Dask + elif isinstance(arr, da.Array): if to.lower() == 'numpy': - dst_arr = arr.compute() + dst_arr = arr.compute() elif to.lower() == 'dask': dst_arr = arr elif to.lower() == 'dataarray_numpy': - dst_arr = xr.DataArray(arr.compute() , dims=dims) + dst_arr = xr.DataArray(arr.compute(), dims=dims) elif to.lower() == 'dataarray_dask': - dst_arr = xr.DataArray(arr, dims=dims) + dst_arr = xr.DataArray(arr, dims=dims) else: raise NotImplementedError - return dst_arr, 'dask' - - # DataArray_Numpy - elif isinstance(arr, xr.DataArray) and isinstance(arr.data, np.ndarray): + return dst_arr, 'dask' + + # DataArray_Numpy + elif isinstance(arr, xr.DataArray) and isinstance(arr.data, np.ndarray): if to.lower() == 'numpy': dst_arr = arr.data elif to.lower() == 'dask': - dst_arr = da.from_array(arr.data) + dst_arr = da.from_array(arr.data) elif to.lower() == 'dataarray_numpy': dst_arr = arr elif to.lower() == 'dataarray_dask': - dst_arr = xr.DataArray(da.from_array(arr.data), dims=dims) + dst_arr = xr.DataArray(da.from_array(arr.data), dims=dims) else: raise NotImplementedError - return dst_arr, 'DataArray_Numpy' - - # DataArray_Dask - elif isinstance(arr, xr.DataArray) and isinstance(arr.data, da.Array): + return dst_arr, 'DataArray_Numpy' + + # DataArray_Dask + elif isinstance(arr, xr.DataArray) and isinstance(arr.data, da.Array): if to.lower() == 'numpy': dst_arr = arr.data.compute() elif to.lower() == 'dask': @@ -1251,34 +1255,36 @@ def _convert_2D_array(arr, to, dims=None): dst_arr = arr else: raise NotImplementedError - return dst_arr, 'DataArray_Dask' - - else: + return dst_arr, 'DataArray_Dask' + + else: raise NotImplementedError + def _get_extended_lonlats(lon_start, lat_start, lon_end, lat_end, npts, transpose=True): - """Utils employed by SwathDefinition.extend. - It extrapolate npts following the forward azimuth with an interdistance - equal to the distance between the starting point and the end point. - """ - import pyproj - geod = pyproj.Geod(ellps='sphere') # TODO: sphere or WGS84? - # geod = pyproj.Geod(ellps='WGS84') # sphere - az12_arr, _, dist_arr = geod.inv(lon_start, lat_start, lon_end, lat_end) - list_lat = [] - list_lon = [] - for lon, lat, az12, dist in zip(lon_end, lat_end, az12_arr, dist_arr): - points = geod.fwd_intermediate(lon, lat, az12, del_s=dist, npts=npts, - out_lons=None, out_lats=None, radians=False) - list_lat.append(points.lats) - list_lon.append(points.lons) - - new_lats = np.stack(list_lat) - new_lons = np.stack(list_lon) - if transpose: - new_lats = new_lats.T - new_lons = new_lons.T - return new_lons, new_lats + """Utils employed by SwathDefinition.extend. + It extrapolate npts following the forward azimuth with an interdistance + equal to the distance between the starting point and the end point. + """ + import pyproj + geod = pyproj.Geod(ellps='sphere') # TODO: sphere or WGS84? + # geod = pyproj.Geod(ellps='WGS84') # sphere + az12_arr, _, dist_arr = geod.inv(lon_start, lat_start, lon_end, lat_end) + list_lat = [] + list_lon = [] + for lon, lat, az12, dist in zip(lon_end, lat_end, az12_arr, dist_arr): + points = geod.fwd_intermediate(lon, lat, az12, del_s=dist, npts=npts, + out_lons=None, out_lats=None, radians=False) + list_lat.append(points.lats) + list_lon.append(points.lons) + + new_lats = np.stack(list_lat) + new_lons = np.stack(list_lon) + if transpose: + new_lats = new_lats.T + new_lons = new_lons.T + return new_lons, new_lats + class DynamicAreaDefinition(object): """An AreaDefintion containing just a subset of the needed parameters. @@ -1814,91 +1820,91 @@ def aggregate(self, **dims): width = int(self.width / dims.get('x', 1)) height = int(self.height / dims.get('y', 1)) return self.copy(height=height, width=width) - + def upsample(self, x=1, y=1): """Return an upsampled version of the area.""" width = int(self.width * x) height = int(self.height * y) return self.copy(height=height, width=width) - - def extend(self, x=0, y=0): + + def extend(self, x=0, y=0): """Extend AreaDef by x/y pixels on each side.""" - if self.is_geostationary: + if self.is_geostationary: raise NotImplementedError("'extend' method is not implemented for GEO AreaDefinition.") - # Check input validity + # Check input validity x = int(x) y = int(y) - if x < 0 or y < 0: + if x < 0 or y < 0: raise ValueError('x and y arguments must be positive integers.') - + # Retrieve pixel and area info - new_width = self.width + 2*x - new_height = self.height + 2*y + new_width = self.width + 2 * x + new_height = self.height + 2 * y pixel_size_x = self.pixel_size_x pixel_size_y = self.pixel_size_y # Extend area_extent (lower_left_x, lower_left_y, upper_right_x, upper_right_y) area_extent = self._area_extent new_area_extent = list(area_extent) - new_area_extent[0] = new_area_extent[0] - pixel_size_x*x - new_area_extent[1] = new_area_extent[1] - pixel_size_y*y - new_area_extent[2] = new_area_extent[2] + pixel_size_x*x - new_area_extent[3] = new_area_extent[3] + pixel_size_y*y - # Define new AreaDefinition - projection = self.crs_wkt + new_area_extent[0] = new_area_extent[0] - pixel_size_x * x + new_area_extent[1] = new_area_extent[1] - pixel_size_y * y + new_area_extent[2] = new_area_extent[2] + pixel_size_x * x + new_area_extent[3] = new_area_extent[3] + pixel_size_y * y + # Define new AreaDefinition + projection = self.crs_wkt area_def = AreaDefinition(self.area_id, self.description, self.proj_id, - projection=projection, + projection=projection, width=new_width, height=new_height, - area_extent=new_area_extent, + area_extent=new_area_extent, rotation=self.rotation, - nprocs=self.nprocs, + nprocs=self.nprocs, dtype=self.dtype) - + return area_def - + def reduce(self, x=0, y=0): """Reduce AreaDef by x/y pixels on each side.""" - if self.is_geostationary: + if self.is_geostationary: raise NotImplementedError("'reduce' method is not implemented for GEO AreaDefinition.") # Check input validity (ensure reduced area is at least 2x2) width = self.width - height = self.height + height = self.height x = int(x) y = int(y) - if x < 0 or y < 0: + if x < 0 or y < 0: raise ValueError('x and y arguments must be positive integers.') - if x >= np.floor(width/2): - max_x = int(np.floor(width/2)) - 1 + if x >= np.floor(width / 2): + max_x = int(np.floor(width / 2)) - 1 raise ValueError("You can at maximum reduce width (x) of AreaDef by {} pixels on each side.".format(max_x)) - if y >= np.floor(height/2): - max_y = int(np.floor(height/2)) - 1 + if y >= np.floor(height / 2): + max_y = int(np.floor(height / 2)) - 1 raise ValueError("You can at maximum reduce height (y) of AreaDef by {} pixels on each side.".format(max_y)) - - # Retrieve pixel and area info - new_width = self.width - 2*x - new_height = self.height - 2*y + + # Retrieve pixel and area info + new_width = self.width - 2 * x + new_height = self.height - 2 * y pixel_size_x = self.pixel_size_x pixel_size_y = self.pixel_size_y area_extent = self._area_extent # Extend area_extent (lower_left_x, lower_left_y, upper_right_x, upper_right_y) new_area_extent = list(area_extent) - new_area_extent[0] = new_area_extent[0] + pixel_size_x*x - new_area_extent[1] = new_area_extent[1] + pixel_size_y*y - new_area_extent[2] = new_area_extent[2] - pixel_size_x*x - new_area_extent[3] = new_area_extent[3] - pixel_size_y*y - # Define new AreaDefinition - projection = self.crs_wkt + new_area_extent[0] = new_area_extent[0] + pixel_size_x * x + new_area_extent[1] = new_area_extent[1] + pixel_size_y * y + new_area_extent[2] = new_area_extent[2] - pixel_size_x * x + new_area_extent[3] = new_area_extent[3] - pixel_size_y * y + # Define new AreaDefinition + projection = self.crs_wkt area_def = AreaDefinition(self.area_id, self.description, self.proj_id, - projection=projection, + projection=projection, width=new_width, height=new_height, - area_extent=new_area_extent, + area_extent=new_area_extent, rotation=self.rotation, - nprocs=self.nprocs, + nprocs=self.nprocs, dtype=self.dtype) - + return area_def - + @property def resolution(self): """Return area resolution in X and Y direction.""" @@ -3242,6 +3248,7 @@ def update_hash(self, the_hash=None): the_hash = areadef.update_hash(the_hash) return the_hash + def _get_slice(segments, shape): """Segment a 1D or 2D array.""" if not (1 <= len(shape) <= 2): diff --git a/pyresample/test/test_geometry.py b/pyresample/test/test_geometry.py index bb6e96f22..59e07beed 100644 --- a/pyresample/test/test_geometry.py +++ b/pyresample/test/test_geometry.py @@ -37,6 +37,7 @@ ) from pyresample.test.utils import catch_warnings + class Test(unittest.TestCase): """Unit testing the geometry and geo_filter modules.""" @@ -1884,37 +1885,37 @@ def test_aggregation(self): lats_dask = da.from_array(lats, chunks=2) lons_dask = da.from_array(lons, chunks=2) lats_xr = xr.DataArray(lats, dims=['y', 'x'], - attrs={'resolution': resolution}) + attrs={'resolution': resolution}) lons_xr = xr.DataArray(lons, dims=['y', 'x'], - attrs={'resolution': resolution}) + attrs={'resolution': resolution}) lats_xr_dask = xr.DataArray(lats_dask, dims=['y', 'x'], - attrs={'resolution': resolution}) + attrs={'resolution': resolution}) lons_xr_dask = xr.DataArray(lons_dask, dims=['y', 'x'], - attrs={'resolution': resolution}) + attrs={'resolution': resolution}) sd_np = SwathDefinition(lons, lats) sd_xr = SwathDefinition(lons_xr, lats_xr) sd_xr_dask = SwathDefinition(lons_xr_dask, lats_xr_dask) - + res_np = sd_np.aggregate(y=window_size, x=window_size) res_xr = sd_xr.aggregate(y=window_size, x=window_size) - res_xr_dask = sd_xr_dask.aggregate(y=window_size, x=window_size) - + res_xr_dask = sd_xr_dask.aggregate(y=window_size, x=window_size) + assert isinstance(res_np.lons, np.ndarray) assert isinstance(res_xr.lats, xr.DataArray) assert isinstance(res_xr_dask.lats, xr.DataArray) assert isinstance(res_xr.lats.data, np.ndarray) assert isinstance(res_xr_dask.lats.data, da.Array) - + np.testing.assert_allclose(res_np.lons, [[179, -179]]) np.testing.assert_allclose(res_np.lats, [[0.5, 0.5]], atol=2e-5) np.testing.assert_allclose(res_xr.lons.data, res_np.lons) np.testing.assert_allclose(res_xr.lats.data, res_np.lats) np.testing.assert_allclose(res_xr_dask.lons.values, res_np.lons) np.testing.assert_allclose(res_xr_dask.lats.values, res_np.lats) - + self.assertAlmostEqual(res_xr.lons.resolution, resolution * window_size) self.assertAlmostEqual(res_xr.lats.resolution, resolution * window_size) - + def test_upsampling(self): """Test upsampling on SwathDefinitions.""" import dask.array as da @@ -1923,77 +1924,77 @@ def test_upsampling(self): from pyresample.geometry import SwathDefinition window_size = 2 resolution = 4 - - lons = np.array([5.0,9.0]) - lats = np.array([6.0,4.0]) + + lons = np.array([5.0, 9.0]) + lats = np.array([6.0, 4.0]) lons, lats = np.meshgrid(lons, lats) - + lats_dask = da.from_array(lats, chunks=2) lons_dask = da.from_array(lons, chunks=2) lats_xr = xr.DataArray(lats, dims=['y', 'x'], - attrs={'resolution': resolution}) + attrs={'resolution': resolution}) lons_xr = xr.DataArray(lons, dims=['y', 'x'], - attrs={'resolution': resolution}) + attrs={'resolution': resolution}) lats_xr_dask = xr.DataArray(lats_dask, dims=['y', 'x'], - attrs={'resolution': resolution}) + attrs={'resolution': resolution}) lons_xr_dask = xr.DataArray(lons_dask, dims=['y', 'x'], - attrs={'resolution': resolution}) + attrs={'resolution': resolution}) sd_np = SwathDefinition(lons, lats) sd_xr = SwathDefinition(lons_xr, lats_xr) sd_xr_dask = SwathDefinition(lons_xr_dask, lats_xr_dask) - + res_np = sd_np.upsample(y=window_size, x=window_size) res_xr = sd_xr.upsample(y=window_size, x=window_size) - res_xr_dask = sd_xr_dask.upsample(y=window_size, x=window_size) - + res_xr_dask = sd_xr_dask.upsample(y=window_size, x=window_size) + assert isinstance(res_np.lons, np.ndarray) assert isinstance(res_xr.lats, xr.DataArray) assert isinstance(res_xr_dask.lats, xr.DataArray) assert isinstance(res_xr.lats.data, np.ndarray) assert isinstance(res_xr_dask.lats.data, da.Array) - np.testing.assert_allclose(res_np.lons[0,:], [4,6,8,10], atol=1e-2) - np.testing.assert_allclose(res_np.lats[:,1], [6.5, 5.5, 4.5, 3.5], atol=1e-2) + np.testing.assert_allclose(res_np.lons[0, :], [4, 6, 8, 10], atol=1e-2) + np.testing.assert_allclose(res_np.lats[:, 1], [6.5, 5.5, 4.5, 3.5], atol=1e-2) np.testing.assert_allclose(res_xr.lons.data, res_np.lons) np.testing.assert_allclose(res_xr.lats.data, res_np.lats) np.testing.assert_allclose(res_xr_dask.lons.values, res_np.lons) np.testing.assert_allclose(res_xr_dask.lats.values, res_np.lats) - + self.assertAlmostEqual(res_xr.lons.resolution, resolution / window_size) - self.assertAlmostEqual(res_xr.lats.resolution, resolution / window_size) - + self.assertAlmostEqual(res_xr.lats.resolution, resolution / window_size) + def test_extend(self): """Test extend on SwathDefinitions.""" import dask.array as da import numpy as np import xarray as xr from pyresample.geometry import SwathDefinition - x = 2 + x = 2 y = 2 resolution = 4 - + lons = np.arange(-179.5, -178.5, 0.5) - lats = np.arange(-89.5, -88.5, 0.5) + lats = np.arange(-89.5, -88.5, 0.5) lons, lats = np.meshgrid(lons, lats) - + lats_dask = da.from_array(lats, chunks=2) lons_dask = da.from_array(lons, chunks=2) lats_xr = xr.DataArray(lats, dims=['y', 'x'], - attrs={'resolution': resolution}) + attrs={'resolution': resolution}) lons_xr = xr.DataArray(lons, dims=['y', 'x'], - attrs={'resolution': resolution}) + attrs={'resolution': resolution}) lats_xr_dask = xr.DataArray(lats_dask, dims=['y', 'x'], - attrs={'resolution': resolution}) + attrs={'resolution': resolution}) lons_xr_dask = xr.DataArray(lons_dask, dims=['y', 'x'], - attrs={'resolution': resolution}) + attrs={'resolution': resolution}) sd_np = SwathDefinition(lons, lats) sd_xr = SwathDefinition(lons_xr, lats_xr) sd_xr_dask = SwathDefinition(lons_xr_dask, lats_xr_dask) - + res_np = sd_np.extend(y=y, x=x) res_xr = sd_xr.extend(y=y, x=x) - res_xr_dask = sd_xr_dask.extend(y=y, x=x) - + res_xr_dask = sd_xr_dask.extend(y=y, x=x) + assert isinstance(res_np.lons, np.ndarray) assert isinstance(res_xr.lats, xr.DataArray) assert isinstance(res_xr_dask.lats, xr.DataArray) @@ -2002,63 +2003,63 @@ def test_extend(self): np.testing.assert_allclose(res_np.lons[2, 0:3], [179.5, 180, -179.5], atol=1e-4) np.testing.assert_allclose(res_np.lons[0, :], [-0.5, 0, 0.5, 1, 1.5, 2], atol=1e-4) - np.testing.assert_allclose(res_np.lats[:, 0], [-89.5, -90.0, -89.5, -89, -88.5, -88.0], atol=1e-3) + np.testing.assert_allclose(res_np.lats[:, 0], [-89.5, -90.0, -89.5, -89, -88.5, -88.0], atol=1e-3) np.testing.assert_allclose(res_xr.lons.data, res_np.lons) np.testing.assert_allclose(res_xr.lats.data, res_np.lats) np.testing.assert_allclose(res_xr_dask.lons.values, res_np.lons) np.testing.assert_allclose(res_xr_dask.lats.values, res_np.lats) - + self.assertAlmostEqual(res_xr.lons.resolution, resolution) - self.assertAlmostEqual(res_xr.lats.resolution, resolution) - + self.assertAlmostEqual(res_xr.lats.resolution, resolution) + def test_reduce(self): """Test reduce on SwathDefinitions.""" import dask.array as da import numpy as np import xarray as xr from pyresample.geometry import SwathDefinition - x = 1 + x = 1 y = 0 resolution = 4 - + lons = np.arange(-179.5, -177.5, 0.5) - lats = np.arange(-89.5, -88.0, 0.5) + lats = np.arange(-89.5, -88.0, 0.5) lons, lats = np.meshgrid(lons, lats) - + lats_dask = da.from_array(lats, chunks=2) lons_dask = da.from_array(lons, chunks=2) lats_xr = xr.DataArray(lats, dims=['y', 'x'], - attrs={'resolution': resolution}) + attrs={'resolution': resolution}) lons_xr = xr.DataArray(lons, dims=['y', 'x'], - attrs={'resolution': resolution}) + attrs={'resolution': resolution}) lats_xr_dask = xr.DataArray(lats_dask, dims=['y', 'x'], - attrs={'resolution': resolution}) + attrs={'resolution': resolution}) lons_xr_dask = xr.DataArray(lons_dask, dims=['y', 'x'], - attrs={'resolution': resolution}) + attrs={'resolution': resolution}) sd_np = SwathDefinition(lons, lats) sd_xr = SwathDefinition(lons_xr, lats_xr) sd_xr_dask = SwathDefinition(lons_xr_dask, lats_xr_dask) - + res_np = sd_np.reduce(y=y, x=x) res_xr = sd_xr.reduce(y=y, x=x) - res_xr_dask = sd_xr_dask.reduce(y=y, x=x) - + res_xr_dask = sd_xr_dask.reduce(y=y, x=x) + assert isinstance(res_np.lons, np.ndarray) assert isinstance(res_xr.lats, xr.DataArray) assert isinstance(res_xr_dask.lats, xr.DataArray) assert isinstance(res_xr.lats.data, np.ndarray) assert isinstance(res_xr_dask.lats.data, da.Array) - np.testing.assert_allclose(res_np.lons[:, 0], lons[:,x] ) - np.testing.assert_allclose(res_np.lons[:, -1], lons[:,-x-1]) + np.testing.assert_allclose(res_np.lons[:, 0], lons[:, x]) + np.testing.assert_allclose(res_np.lons[:, -1], lons[:, -x - 1]) np.testing.assert_allclose(res_xr.lons.data, res_np.lons) np.testing.assert_allclose(res_xr.lats.data, res_np.lats) np.testing.assert_allclose(res_xr_dask.lons.values, res_np.lons) np.testing.assert_allclose(res_xr_dask.lats.values, res_np.lats) - + self.assertAlmostEqual(res_xr.lons.resolution, resolution) - self.assertAlmostEqual(res_xr.lats.resolution, resolution) - + self.assertAlmostEqual(res_xr.lats.resolution, resolution) + def test_striding(self): """Test striding.""" import dask.array as da @@ -2727,7 +2728,7 @@ def test_aggregate(self): np.testing.assert_allclose(res.area_extent, area.area_extent) self.assertEqual(res.shape[0], area.shape[0] / 2) self.assertEqual(res.shape[1], area.shape[1] / 4) - + def test_upsample(self): """Test upsample of AreaDefinitions.""" area = geometry.AreaDefinition('areaD', 'Europe (3km, HRV, VTC)', 'areaD', @@ -2747,8 +2748,8 @@ def test_upsample(self): self.assertDictEqual(res.proj_dict, area.proj_dict) np.testing.assert_allclose(res.area_extent, area.area_extent) self.assertEqual(res.shape[0], area.shape[0] * 2) - self.assertEqual(res.shape[1], area.shape[1] * 4) - + self.assertEqual(res.shape[1], area.shape[1] * 4) + def test_reduce(self): """Test reduce of AreaDefinitions.""" from pyresample import geometry @@ -2765,13 +2766,13 @@ def test_reduce(self): -909968.64000000001, 1029087.28, 1490031.3600000001]) - x = 8 + x = 8 y = 2 res = area.reduce(x=x, y=y) self.assertDictEqual(res.proj_dict, area.proj_dict) - self.assertEqual(res.shape[0], area.shape[0] - y* 2) - self.assertEqual(res.shape[1], area.shape[1] - x* 2) - + self.assertEqual(res.shape[0], area.shape[0] - y * 2) + self.assertEqual(res.shape[1], area.shape[1] - x * 2) + res = area.reduce(x=0, y=0) np.testing.assert_allclose(res.area_extent, area.area_extent) @@ -2791,16 +2792,17 @@ def test_extend(self): -909968.64000000001, 1029087.28, 1490031.3600000001]) - x = 8 + x = 8 y = 2 res = area.extend(x=x, y=y) self.assertDictEqual(res.proj_dict, area.proj_dict) - self.assertEqual(res.shape[0], area.shape[0] + y* 2) - self.assertEqual(res.shape[1], area.shape[1] + x* 2) - + self.assertEqual(res.shape[0], area.shape[0] + y * 2) + self.assertEqual(res.shape[1], area.shape[1] + x * 2) + res = area.extend(x=0, y=0) np.testing.assert_allclose(res.area_extent, area.area_extent) + def test_enclose_areas(): """Test enclosing areas.""" from pyresample.geometry import create_area_def, enclose_areas @@ -3089,4 +3091,3 @@ def _is_clockwise(lons, lats): edge_sum += xdiff * ysum prev_point = point return edge_sum > 0 - From a56e49e220b115d02c7ae5010c286c606c2d57ae Mon Sep 17 00:00:00 2001 From: ghiggi Date: Tue, 18 Jan 2022 16:13:07 +0100 Subject: [PATCH 05/12] Fix last lint error remnants --- pyresample/geometry.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pyresample/geometry.py b/pyresample/geometry.py index 5b79ced9d..c716b02d0 100644 --- a/pyresample/geometry.py +++ b/pyresample/geometry.py @@ -937,6 +937,7 @@ def _upsample_centroid(centroid, x_factor=1, y_factor=1): def extend(self, x=0, y=0): """Extend the swath definition along x (along-track) and y (across-track) dimensions. + By default, it does not extend on any direction. To extend of n pixel on both sides of the across-track direction, call swath_def.extend(x=0, y=2). """ @@ -1007,7 +1008,7 @@ def reduce(self, x=0, y=0): raise ValueError('x and y arguments must be positive integers.') if x >= np.floor(width / 2): max_x = int(np.floor(width / 2)) - 1 - raise ValueError("""You can at maximum reduce the along-track direction (x) + raise ValueError("""You can at maximum reduce the along-track direction (x) of SwathDef by {} pixels on each side.""".format(max_x)) if y >= np.floor(height / 2): max_y = int(np.floor(height / 2)) - 1 @@ -1168,6 +1169,7 @@ def compute_optimal_bb_area(self, proj_dict=None): def _convert_2D_array(arr, to, dims=None): """ Convert a 2D array to a specific format. + Useful to return swath lons, lats in the same original format after processing. Parameters @@ -1263,6 +1265,7 @@ def _convert_2D_array(arr, to, dims=None): def _get_extended_lonlats(lon_start, lat_start, lon_end, lat_end, npts, transpose=True): """Utils employed by SwathDefinition.extend. + It extrapolate npts following the forward azimuth with an interdistance equal to the distance between the starting point and the end point. """ From 574c2d0309d724e7935b4a173781be85f844dc44 Mon Sep 17 00:00:00 2001 From: ghiggi Date: Tue, 18 Jan 2022 16:16:26 +0100 Subject: [PATCH 06/12] Fix last lint error remnants - part2 --- pyresample/geometry.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pyresample/geometry.py b/pyresample/geometry.py index c716b02d0..d7a3fb907 100644 --- a/pyresample/geometry.py +++ b/pyresample/geometry.py @@ -937,7 +937,7 @@ def _upsample_centroid(centroid, x_factor=1, y_factor=1): def extend(self, x=0, y=0): """Extend the swath definition along x (along-track) and y (across-track) dimensions. - + By default, it does not extend on any direction. To extend of n pixel on both sides of the across-track direction, call swath_def.extend(x=0, y=2). """ @@ -1169,7 +1169,7 @@ def compute_optimal_bb_area(self, proj_dict=None): def _convert_2D_array(arr, to, dims=None): """ Convert a 2D array to a specific format. - + Useful to return swath lons, lats in the same original format after processing. Parameters @@ -1262,10 +1262,9 @@ def _convert_2D_array(arr, to, dims=None): else: raise NotImplementedError - def _get_extended_lonlats(lon_start, lat_start, lon_end, lat_end, npts, transpose=True): """Utils employed by SwathDefinition.extend. - + It extrapolate npts following the forward azimuth with an interdistance equal to the distance between the starting point and the end point. """ From 76467a247676bee0e40b63bd85aae61b253801e7 Mon Sep 17 00:00:00 2001 From: ghiggi Date: Tue, 18 Jan 2022 16:18:20 +0100 Subject: [PATCH 07/12] Fix last lint error remnants - parte --- pyresample/geometry.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pyresample/geometry.py b/pyresample/geometry.py index d7a3fb907..110e7e73f 100644 --- a/pyresample/geometry.py +++ b/pyresample/geometry.py @@ -1169,7 +1169,7 @@ def compute_optimal_bb_area(self, proj_dict=None): def _convert_2D_array(arr, to, dims=None): """ Convert a 2D array to a specific format. - + Useful to return swath lons, lats in the same original format after processing. Parameters @@ -1262,9 +1262,10 @@ def _convert_2D_array(arr, to, dims=None): else: raise NotImplementedError + def _get_extended_lonlats(lon_start, lat_start, lon_end, lat_end, npts, transpose=True): """Utils employed by SwathDefinition.extend. - + It extrapolate npts following the forward azimuth with an interdistance equal to the distance between the starting point and the end point. """ From cd4eab5585e6388819df74afdec1e507cc96891a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 18 Jan 2022 15:48:43 +0000 Subject: [PATCH 08/12] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- pyresample/geometry.py | 9 ++++++--- pyresample/test/test_geometry.py | 4 ++++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/pyresample/geometry.py b/pyresample/geometry.py index 110e7e73f..77026be54 100644 --- a/pyresample/geometry.py +++ b/pyresample/geometry.py @@ -738,9 +738,9 @@ def aggregate(self, x=1, y=1, **kwargs): This can be changed by passing boundary="trim" or boundary="pad", but behaviour within pyresample is undefined. See https://xarray.pydata.org/en/stable/generated/xarray.DataArray.coarsen.html for further details. """ - import xarray as xr import dask.array as da import pyproj + import xarray as xr # Check input validity x = int(x) @@ -809,10 +809,11 @@ def upsample(self, x=1, y=1): # TODO: An alternative would be to use geotiepoints.geointerpolator.GeoInterpolator # But I have some problem using it, see code snippet in the PR description. import dask.array as da - import xarray as xr import numpy as np import pyproj + import xarray as xr from xarray.plot.utils import _infer_interval_breaks + # https://github.com/pydata/xarray/blob/main/xarray/plot/utils.py#L784 def _upsample_ranges_1D(x, factor=1): @@ -942,6 +943,7 @@ def extend(self, x=0, y=0): To extend of n pixel on both sides of the across-track direction, call swath_def.extend(x=0, y=2). """ import xarray as xr + # Check input validity x = int(x) y = int(y) @@ -1193,9 +1195,10 @@ def _convert_2D_array(arr, to, dims=None): The source format of the 2D array. """ - import numpy as np import dask.array as da + import numpy as np import xarray as xr + # Checks valid_format = ['Numpy', 'Dask', 'DataArray_Numpy', 'DataArray_Dask'] if not isinstance(to, str): diff --git a/pyresample/test/test_geometry.py b/pyresample/test/test_geometry.py index 59e07beed..99c0fe4a9 100644 --- a/pyresample/test/test_geometry.py +++ b/pyresample/test/test_geometry.py @@ -1877,6 +1877,7 @@ def test_aggregation(self): import dask.array as da import numpy as np import xarray as xr + from pyresample.geometry import SwathDefinition window_size = 2 resolution = 3 @@ -1921,6 +1922,7 @@ def test_upsampling(self): import dask.array as da import numpy as np import xarray as xr + from pyresample.geometry import SwathDefinition window_size = 2 resolution = 4 @@ -1968,6 +1970,7 @@ def test_extend(self): import dask.array as da import numpy as np import xarray as xr + from pyresample.geometry import SwathDefinition x = 2 y = 2 @@ -2017,6 +2020,7 @@ def test_reduce(self): import dask.array as da import numpy as np import xarray as xr + from pyresample.geometry import SwathDefinition x = 1 y = 0 From f2530f5bea1b1e362426dcafa92e9f943ea7668b Mon Sep 17 00:00:00 2001 From: ghiggi Date: Tue, 18 Jan 2022 17:31:11 +0100 Subject: [PATCH 09/12] Add tests for 2D array conversion between formats. --- pyresample/test/test_geometry.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/pyresample/test/test_geometry.py b/pyresample/test/test_geometry.py index 99c0fe4a9..31e21be14 100644 --- a/pyresample/test/test_geometry.py +++ b/pyresample/test/test_geometry.py @@ -2064,6 +2064,36 @@ def test_reduce(self): self.assertAlmostEqual(res_xr.lons.resolution, resolution) self.assertAlmostEqual(res_xr.lats.resolution, resolution) + def test_latslons_arr_conversion(self): + """Test conversion of 2D arrays between various formats.""" + import dask.array as da + import numpy as np + import xarray as xr + + from pyresample.geometry import _convert_2D_array + + lons = np.arange(-179.5, -177.5, 0.5) + lats = np.arange(-89.5, -88.0, 0.5) + lons, lats = np.meshgrid(lons, lats) + lats_dask = da.from_array(lats, chunks=2) + lats_xr = xr.DataArray(lats, dims=['y', 'x']) + lats_xr_dask = xr.DataArray(lats_dask, dims=['y', 'x']) + valid_format = ['Numpy', 'Dask', 'DataArray_Numpy', 'DataArray_Dask'] + dict_format = {'Numpy': lats, + 'Dask': lats_dask, + 'DataArray_Numpy': lats_xr, + 'DataArray_Dask': lats_xr_dask + } + for in_format in valid_format: + for out_format in valid_format: + out_arr, src_format = _convert_2D_array(dict_format[in_format], to=out_format, dims=None) + assert isinstance(out_arr, type(dict_format[out_format])) + assert src_format.lower() == in_format.lower() + if out_format.lower() == "dataarray_numpy": + assert isinstance(out_arr.data, type(dict_format['Numpy'])) + if out_format.lower() == "dataarray_dask": + assert isinstance(out_arr.data, type(dict_format['Dask'])) + def test_striding(self): """Test striding.""" import dask.array as da From 2bb5b8cd690d089cc39e143a32b57ed3ce636e4c Mon Sep 17 00:00:00 2001 From: ghiggi Date: Wed, 19 Jan 2022 11:36:21 +0100 Subject: [PATCH 10/12] Increase test coverage --- pyresample/geometry.py | 62 +++++++--- pyresample/test/test_geometry.py | 205 +++++++++++++++++++++++++++++-- 2 files changed, 241 insertions(+), 26 deletions(-) diff --git a/pyresample/geometry.py b/pyresample/geometry.py index 77026be54..45af21bd9 100644 --- a/pyresample/geometry.py +++ b/pyresample/geometry.py @@ -728,12 +728,12 @@ def _do_transform(src, dst, lons, lats, alt): # x, y, z = transformer.transform(lons, lats, alt, radians=False) # return np.dstack((x, y, z)) - def aggregate(self, x=1, y=1, **kwargs): + def downsample(self, x=1, y=1, **kwargs): """Downsample the swath definition by averaging the coordinates along x and y dimension. Builds upon xarray.DataArray.coarsen function. - To downsample of a factor of 2, call swath_def.aggregate(x=2, y=2) - swath_def.aggregate(x=1, y=1) simply returns the current swath_def. + To downsample of a factor of 2, call swath_def.downsample(x=2, y=2) + swath_def.downsample(x=1, y=1) simply returns the current swath_def. By default, it raise a ValueError if the dimension size is not a multiple of the window size. This can be changed by passing boundary="trim" or boundary="pad", but behaviour within pyresample is undefined. See https://xarray.pydata.org/en/stable/generated/xarray.DataArray.coarsen.html for further details. @@ -748,7 +748,7 @@ def aggregate(self, x=1, y=1, **kwargs): if x < 1 or y < 1: raise ValueError('x and y arguments must be positive integers larger or equal to 1.') - # Return SwathDefinition if nothing to aggregate + # Return SwathDefinition if nothing to downsample if x == 1 and y == 1: return self @@ -815,6 +815,15 @@ def upsample(self, x=1, y=1): from xarray.plot.utils import _infer_interval_breaks # https://github.com/pydata/xarray/blob/main/xarray/plot/utils.py#L784 + # Check input validity + x = int(x) + y = int(y) + if x < 1 or y < 1: + raise ValueError('x and y arguments must be positive integers larger or equal to 1.') + + # Return SwathDefinition if nothing to upsample + if x == 1 and y == 1: + return self def _upsample_ranges_1D(x, factor=1): ranges2D = np.linspace(x[:-1], x[1:], num=factor, endpoint=False, axis=1) @@ -1215,10 +1224,8 @@ def _convert_2D_array(arr, to, dims=None): dst_arr = da.from_array(arr) elif to.lower() == 'dataarray_numpy': dst_arr = xr.DataArray(arr, dims=dims) - elif to.lower() == 'dataarray_dask': + else: # to.lower() == 'dataarray_dask': dst_arr = xr.DataArray(da.from_array(arr), dims=dims) - else: - raise NotImplementedError return dst_arr, 'numpy' # Dask elif isinstance(arr, da.Array): @@ -1228,10 +1235,8 @@ def _convert_2D_array(arr, to, dims=None): dst_arr = arr elif to.lower() == 'dataarray_numpy': dst_arr = xr.DataArray(arr.compute(), dims=dims) - elif to.lower() == 'dataarray_dask': + else: # to.lower() == 'dataarray_dask': dst_arr = xr.DataArray(arr, dims=dims) - else: - raise NotImplementedError return dst_arr, 'dask' # DataArray_Numpy @@ -1242,10 +1247,8 @@ def _convert_2D_array(arr, to, dims=None): dst_arr = da.from_array(arr.data) elif to.lower() == 'dataarray_numpy': dst_arr = arr - elif to.lower() == 'dataarray_dask': + else: # to.lower() == 'dataarray_dask': dst_arr = xr.DataArray(da.from_array(arr.data), dims=dims) - else: - raise NotImplementedError return dst_arr, 'DataArray_Numpy' # DataArray_Dask @@ -1256,10 +1259,8 @@ def _convert_2D_array(arr, to, dims=None): dst_arr = arr.data elif to.lower() == 'dataarray_numpy': dst_arr = arr.compute() - elif to.lower() == 'dataarray_dask': + else: # to.lower() == 'dataarray_dask': dst_arr = arr - else: - raise NotImplementedError return dst_arr, 'DataArray_Dask' else: @@ -1823,12 +1824,35 @@ def copy(self, **override_kwargs): def aggregate(self, **dims): """Return an aggregated version of the area.""" + warnings.warn("'aggregate' is deprecated, use 'downsample' or 'upsample' instead.", PendingDeprecationWarning) width = int(self.width / dims.get('x', 1)) height = int(self.height / dims.get('y', 1)) return self.copy(height=height, width=width) + def downsample(self, x=1, y=1): + """Return a downsampled version of the area.""" + # Check input validity + x = int(x) + y = int(y) + if x == 1 and y == 1: + return self + if x < 1 or y < 1: + raise ValueError('x and y arguments must be positive integers larger or equal to 1.') + # Downsample + width = int(self.width / x) + height = int(self.height / y) + return self.copy(height=height, width=width) + def upsample(self, x=1, y=1): """Return an upsampled version of the area.""" + # Check input validity + x = int(x) + y = int(y) + if x == 1 and y == 1: + return self + if x < 1 or y < 1: + raise ValueError('x and y arguments must be positive integers larger or equal to 1.') + # Upsample width = int(self.width * x) height = int(self.height * y) return self.copy(height=height, width=width) @@ -1842,7 +1866,8 @@ def extend(self, x=0, y=0): y = int(y) if x < 0 or y < 0: raise ValueError('x and y arguments must be positive integers.') - + if x == 0 and y == 0: + return self # Retrieve pixel and area info new_width = self.width + 2 * x new_height = self.height + 2 * y @@ -1877,6 +1902,8 @@ def reduce(self, x=0, y=0): height = self.height x = int(x) y = int(y) + if x == 0 and y == 0: + return self if x < 0 or y < 0: raise ValueError('x and y arguments must be positive integers.') if x >= np.floor(width / 2): @@ -1885,7 +1912,6 @@ def reduce(self, x=0, y=0): if y >= np.floor(height / 2): max_y = int(np.floor(height / 2)) - 1 raise ValueError("You can at maximum reduce height (y) of AreaDef by {} pixels on each side.".format(max_y)) - # Retrieve pixel and area info new_width = self.width - 2 * x new_height = self.height - 2 * y diff --git a/pyresample/test/test_geometry.py b/pyresample/test/test_geometry.py index 31e21be14..27fb6115e 100644 --- a/pyresample/test/test_geometry.py +++ b/pyresample/test/test_geometry.py @@ -1872,7 +1872,7 @@ def test_compute_optimal_bb(self): assert_np_dict_allclose(res.proj_dict, proj_dict) self.assertEqual(res.shape, (6, 3)) - def test_aggregation(self): + def test_downsampling(self): """Test aggregation on SwathDefinitions.""" import dask.array as da import numpy as np @@ -1897,11 +1897,11 @@ def test_aggregation(self): sd_xr = SwathDefinition(lons_xr, lats_xr) sd_xr_dask = SwathDefinition(lons_xr_dask, lats_xr_dask) - res_np = sd_np.aggregate(y=window_size, x=window_size) - res_xr = sd_xr.aggregate(y=window_size, x=window_size) - res_xr_dask = sd_xr_dask.aggregate(y=window_size, x=window_size) + res_np = sd_np.downsample(y=window_size, x=window_size) + res_xr = sd_xr.downsample(y=window_size, x=window_size) + res_xr_dask = sd_xr_dask.downsample(y=window_size, x=window_size) - assert isinstance(res_np.lons, np.ndarray) + assert isinstance(res_np.lats, np.ndarray) assert isinstance(res_xr.lats, xr.DataArray) assert isinstance(res_xr_dask.lats, xr.DataArray) assert isinstance(res_xr.lats.data, np.ndarray) @@ -1916,6 +1916,18 @@ def test_aggregation(self): self.assertAlmostEqual(res_xr.lons.resolution, resolution * window_size) self.assertAlmostEqual(res_xr.lats.resolution, resolution * window_size) + # Test skip aggregation + np.testing.assert_allclose(sd_np.downsample(y=1, x=1).lats, sd_np.lats) + np.testing.assert_allclose(sd_np.downsample(y=1, x=1).lons, sd_np.lons) + # Test invalid arguments + self.assertRaises(ValueError, sd_np.downsample, 0, 0) + self.assertRaises(ValueError, sd_np.downsample, -1, -1) + # Test works with DataArray also without attrs + lats_xr = xr.DataArray(lats, dims=['y', 'x']) + lons_xr = xr.DataArray(lons, dims=['y', 'x']) + sd_xr1 = SwathDefinition(lons_xr, lats_xr) + res_xr1 = sd_xr1.downsample(y=window_size, x=window_size) + np.testing.assert_allclose(res_xr1.lats.data, res_xr.lats.data) def test_upsampling(self): """Test upsampling on SwathDefinitions.""" @@ -1965,6 +1977,19 @@ def test_upsampling(self): self.assertAlmostEqual(res_xr.lons.resolution, resolution / window_size) self.assertAlmostEqual(res_xr.lats.resolution, resolution / window_size) + # Test skip upsampling + np.testing.assert_allclose(sd_np.upsample(y=1, x=1).lats, sd_np.lats) + np.testing.assert_allclose(sd_np.upsample(y=1, x=1).lons, sd_np.lons) + # Test invalid arguments + self.assertRaises(ValueError, sd_np.upsample, 0, 0) + self.assertRaises(ValueError, sd_np.upsample, -1, -1) + # Test works with DataArray also without attrs + lats_xr = xr.DataArray(lats, dims=['y', 'x']) + lons_xr = xr.DataArray(lons, dims=['y', 'x']) + sd_xr1 = SwathDefinition(lons_xr, lats_xr) + res_xr1 = sd_xr1.upsample(y=window_size, x=window_size) + np.testing.assert_allclose(res_xr1.lats.data, res_xr.lats.data) + def test_extend(self): """Test extend on SwathDefinitions.""" import dask.array as da @@ -2015,6 +2040,18 @@ def test_extend(self): self.assertAlmostEqual(res_xr.lons.resolution, resolution) self.assertAlmostEqual(res_xr.lats.resolution, resolution) + # Test skip extension + np.testing.assert_allclose(sd_np.extend(y=0, x=0).lats, sd_np.lats) + np.testing.assert_allclose(sd_np.extend(y=0, x=0).lons, sd_np.lons) + # Test invalid arguments + self.assertRaises(ValueError, sd_np.extend, -1, -1) + # Test works with DataArray also without attrs + lats_xr = xr.DataArray(lats, dims=['y', 'x']) + lons_xr = xr.DataArray(lons, dims=['y', 'x']) + sd_xr1 = SwathDefinition(lons_xr, lats_xr) + res_xr1 = sd_xr1.extend(y=y, x=x) + np.testing.assert_allclose(res_xr1.lats.data, res_xr.lats.data) + def test_reduce(self): """Test reduce on SwathDefinitions.""" import dask.array as da @@ -2064,6 +2101,21 @@ def test_reduce(self): self.assertAlmostEqual(res_xr.lons.resolution, resolution) self.assertAlmostEqual(res_xr.lats.resolution, resolution) + # Test skip reduction + np.testing.assert_allclose(sd_np.reduce(y=0, x=0).lats, sd_np.lats) + np.testing.assert_allclose(sd_np.reduce(y=0, x=0).lons, sd_np.lons) + # Test invalid arguments + self.assertRaises(ValueError, sd_np.reduce, -1, -1) + # Test works with DataArray also without attrs + lats_xr = xr.DataArray(lats, dims=['y', 'x']) + lons_xr = xr.DataArray(lons, dims=['y', 'x']) + sd_xr1 = SwathDefinition(lons_xr, lats_xr) + res_xr1 = sd_xr1.reduce(y=y, x=x) + np.testing.assert_allclose(res_xr1.lats.data, res_xr.lats.data) + # Test it raise Error if x or y are too large and not ensure output to be 2x2 at least + self.assertRaises(ValueError, sd_np.reduce, lons.shape[1] / 2, 0) + self.assertRaises(ValueError, sd_np.reduce, 0, lons.shape[0] / 2) + def test_latslons_arr_conversion(self): """Test conversion of 2D arrays between various formats.""" import dask.array as da @@ -2072,6 +2124,7 @@ def test_latslons_arr_conversion(self): from pyresample.geometry import _convert_2D_array + # Test all conversion works lons = np.arange(-179.5, -177.5, 0.5) lats = np.arange(-89.5, -88.0, 0.5) lons, lats = np.meshgrid(lons, lats) @@ -2093,6 +2146,26 @@ def test_latslons_arr_conversion(self): assert isinstance(out_arr.data, type(dict_format['Numpy'])) if out_format.lower() == "dataarray_dask": assert isinstance(out_arr.data, type(dict_format['Dask'])) + # Test raise errors + self.assertRaises(TypeError, _convert_2D_array, dict_format['Numpy'], ['unvalid_type']) + self.assertRaises(TypeError, _convert_2D_array, [dict_format['Numpy']], 'numpy') + self.assertRaises(ValueError, _convert_2D_array, dict_format['Numpy'], 'unvalid_format') + + def test_get_extended_lonlats(self): + import numpy as np + + from pyresample.geometry import _get_extended_lonlats + lon_start = np.array([10, 20]) + lon_end = np.array([20, 30]) + lat_start = np.array([0, 1]) + lat_end = np.array([0, 1]) + + npts = 2 + ext_lons, ext_lats = _get_extended_lonlats(lon_start, lat_start, + lon_end, lat_end, + npts, transpose=True) + np.testing.assert_allclose(ext_lons[0, :], [30.0, 40.0]) + np.testing.assert_allclose(ext_lons[1, :], [40.0, 50.0], atol=1e4) def test_striding(self): """Test striding.""" @@ -2763,8 +2836,59 @@ def test_aggregate(self): self.assertEqual(res.shape[0], area.shape[0] / 2) self.assertEqual(res.shape[1], area.shape[1] / 4) + # Test skip aggregate + res = area.aggregate(x=1, y=1) + np.testing.assert_allclose(res.shape, area.shape) + assert res == area + + def test_downsample(self): + """Test downsampling of AreaDefinitions.""" + area = geometry.AreaDefinition('areaD', 'Europe (3km, HRV, VTC)', 'areaD', + {'a': '6378144.0', + 'b': '6356759.0', + 'lat_0': '50.00', + 'lat_ts': '50.00', + 'lon_0': '8.00', + 'proj': 'stere'}, + 800, + 800, + [-1370912.72, + -909968.64000000001, + 1029087.28, + 1490031.3600000001]) + res = area.downsample(x=4, y=2) + self.assertDictEqual(res.proj_dict, area.proj_dict) + np.testing.assert_allclose(res.area_extent, area.area_extent) + self.assertEqual(res.shape[0], area.shape[0] / 2) + self.assertEqual(res.shape[1], area.shape[1] / 4) + + # Test skip downsampling + res = area.downsample(x=1, y=1) + np.testing.assert_allclose(res.shape, area.shape) + assert res == area + + # Test invalid arguments + self.assertRaises(ValueError, area.downsample, 0, 0) + self.assertRaises(ValueError, area.downsample, 0, 1) + self.assertRaises(ValueError, area.downsample, 1, 0) + self.assertRaises(ValueError, area.downsample, -1, -1) + + # Test raise NotImplementedError for GEO + # area_geo = geometry.AreaDefinition(area_id='seviri', + # description='SEVIRI HRIT like (flipped, south up)', + # proj_id='seviri', + # projection={'proj': 'geos', + # 'lon_0': 0.0, + # 'a': 6378169.00, + # 'b': 6356583.80, + # 'h': 35785831.00, + # 'units': 'm'}, + # width=123, height=123, + # area_extent=[5500000, 5500000, -5500000, -5500000]) + # self.assertRaises(NotImplementedError, area_geo.downsample, 2, 2) + def test_upsample(self): - """Test upsample of AreaDefinitions.""" + """Test upsampling of AreaDefinitions.""" area = geometry.AreaDefinition('areaD', 'Europe (3km, HRV, VTC)', 'areaD', {'a': '6378144.0', 'b': '6356759.0', @@ -2784,8 +2908,33 @@ def test_upsample(self): self.assertEqual(res.shape[0], area.shape[0] * 2) self.assertEqual(res.shape[1], area.shape[1] * 4) + # Test skip upsampling + res = area.upsample(x=1, y=1) + np.testing.assert_allclose(res.shape, area.shape) + assert res == area + + # Test invalid arguments + self.assertRaises(ValueError, area.upsample, 0, 0) + self.assertRaises(ValueError, area.upsample, 0, 1) + self.assertRaises(ValueError, area.upsample, 1, 0) + self.assertRaises(ValueError, area.upsample, -1, -1) + + # # Test raise NotImplementedError for GEO + # area_geo = geometry.AreaDefinition(area_id='seviri', + # description='SEVIRI HRIT like (flipped, south up)', + # proj_id='seviri', + # projection={'proj': 'geos', + # 'lon_0': 0.0, + # 'a': 6378169.00, + # 'b': 6356583.80, + # 'h': 35785831.00, + # 'units': 'm'}, + # width=123, height=123, + # area_extent=[5500000, 5500000, -5500000, -5500000]) + # self.assertRaises(NotImplementedError, area_geo.upsample, 2, 2) + def test_reduce(self): - """Test reduce of AreaDefinitions.""" + """Test reduction of AreaDefinitions.""" from pyresample import geometry area = geometry.AreaDefinition('areaD', 'Europe (3km, HRV, VTC)', 'areaD', {'a': '6378144.0', @@ -2807,11 +2956,32 @@ def test_reduce(self): self.assertEqual(res.shape[0], area.shape[0] - y * 2) self.assertEqual(res.shape[1], area.shape[1] - x * 2) + # Test skip reduction res = area.reduce(x=0, y=0) np.testing.assert_allclose(res.area_extent, area.area_extent) + assert res == area + + # Test invalid arguments + self.assertRaises(ValueError, area.reduce, -1, -1) + self.assertRaises(ValueError, area.reduce, area.shape[0] / 2, 0) + self.assertRaises(ValueError, area.reduce, 0, area.shape[1] / 2) + + # Test raise NotImplementedError for GEO + area_geo = geometry.AreaDefinition(area_id='seviri', + description='SEVIRI HRIT like (flipped, south up)', + proj_id='seviri', + projection={'proj': 'geos', + 'lon_0': 0.0, + 'a': 6378169.00, + 'b': 6356583.80, + 'h': 35785831.00, + 'units': 'm'}, + width=123, height=123, + area_extent=[5500000, 5500000, -5500000, -5500000]) + self.assertRaises(NotImplementedError, area_geo.reduce, 1, 1) def test_extend(self): - """Test extend of AreaDefinitions.""" + """Test extension of AreaDefinitions.""" from pyresample import geometry area = geometry.AreaDefinition('areaD', 'Europe (3km, HRV, VTC)', 'areaD', {'a': '6378144.0', @@ -2833,8 +3003,27 @@ def test_extend(self): self.assertEqual(res.shape[0], area.shape[0] + y * 2) self.assertEqual(res.shape[1], area.shape[1] + x * 2) + # Test skip extension res = area.extend(x=0, y=0) np.testing.assert_allclose(res.area_extent, area.area_extent) + assert res == area + + # Test invalid arguments + self.assertRaises(ValueError, area.extend, -1, -1) + + # Test raise NotImplementedError for GEO + area_geo = geometry.AreaDefinition(area_id='seviri', + description='SEVIRI HRIT like (flipped, south up)', + proj_id='seviri', + projection={'proj': 'geos', + 'lon_0': 0.0, + 'a': 6378169.00, + 'b': 6356583.80, + 'h': 35785831.00, + 'units': 'm'}, + width=123, height=123, + area_extent=[5500000, 5500000, -5500000, -5500000]) + self.assertRaises(NotImplementedError, area_geo.extend, 1, 1) def test_enclose_areas(): From 2e5997c9d7a14111866c0f4c9eca71bc6ba92af6 Mon Sep 17 00:00:00 2001 From: ghiggi Date: Wed, 19 Jan 2022 15:39:56 +0100 Subject: [PATCH 11/12] Refactoring of upsampling utils + coverage --- pyresample/geometry.py | 193 +++++++++++++++++++++---------- pyresample/test/test_geometry.py | 57 +++++++++ 2 files changed, 192 insertions(+), 58 deletions(-) diff --git a/pyresample/geometry.py b/pyresample/geometry.py index 45af21bd9..af76ea1c8 100644 --- a/pyresample/geometry.py +++ b/pyresample/geometry.py @@ -807,7 +807,7 @@ def upsample(self, x=1, y=1): swath_def.upsample(x=1, y=1) simply returns the current swath_def. """ # TODO: An alternative would be to use geotiepoints.geointerpolator.GeoInterpolator - # But I have some problem using it, see code snippet in the PR description. + # But I have some problem using it, see code snippet in a comment of the PR. import dask.array as da import numpy as np import pyproj @@ -824,70 +824,31 @@ def upsample(self, x=1, y=1): # Return SwathDefinition if nothing to upsample if x == 1 and y == 1: return self - - def _upsample_ranges_1D(x, factor=1): - ranges2D = np.linspace(x[:-1], x[1:], num=factor, endpoint=False, axis=1) - return np.concatenate((ranges2D.ravel(), [x[-1]])) - - def upsample_ranges_2D(x, factor=1, axis=0): - x = np.array(x) - if x.ndim not in [1, 2]: - raise ValueError("Expects 1D or 2D array.") - if not isinstance(axis, int): - raise TypeError("'axis' must be: 0 or 1 integer.") - if axis not in [0, 1]: - raise ValueError("Expects 'axis' 0 or 1") - if not isinstance(factor, int): - raise TypeError("'factor' must be an integer equal or larger than 1.") - if factor < 1: - raise ValueError("'factor' must be an integer equal or larger than 1.") - - if x.ndim == 1: - return _upsample_ranges_1D(x, factor=factor) - else: - l_ranges = [] - if axis == 1: - for i in range(x.shape[0]): - l_ranges.append(_upsample_ranges_1D(x[i, :], factor=factor)) - return np.vstack(l_ranges) - else: # axis = 0 - for i in range(x.shape[1]): - l_ranges.append(_upsample_ranges_1D(x[:, i], factor=factor)) - return np.vstack(l_ranges).transpose() - - def _upsample_corners(corners, x_factor=1, y_factor=1): - new_breaks_xx = upsample_ranges_2D(corners, factor=x_factor, axis=1) - new_corners = upsample_ranges_2D(new_breaks_xx, factor=y_factor, axis=0) - return new_corners + # --------------------------------------------------------------------. + # TODO: + # - Refactor for dask-compatibility + # - Should we make _infer_interval_breaks dask-compatible? def _get_corners_from_centroids(centroids): breaks_xx = _infer_interval_breaks(centroids, axis=1) corners = _infer_interval_breaks(breaks_xx, axis=0) return corners - def _get_centroids_from_corners(corners): - centroids = (corners[1:, 1:] + corners[:-1, :-1]) / 2 - return centroids - - # TODO: Decide if compute in memory or with dask - def upsample_centroids(centroid_x, centroid_y, centroid_z, x_factor=1, y_factor=1): - corners_x = _get_corners_from_centroids(centroid_x) - corners_y = _get_corners_from_centroids(centroid_y) - corners_z = _get_corners_from_centroids(centroid_z) - x_new_corners = _upsample_corners(corners_x, x_factor=x_factor, y_factor=y_factor) - y_new_corners = _upsample_corners(corners_y, x_factor=x_factor, y_factor=y_factor) - z_new_corners = _upsample_corners(corners_z, x_factor=x_factor, y_factor=y_factor) - x_new_centroids = _get_centroids_from_corners(x_new_corners) - y_new_centroids = _get_centroids_from_corners(y_new_corners) - z_new_centroids = _get_centroids_from_corners(z_new_corners) - return x_new_centroids, y_new_centroids, z_new_centroids - - def _upsample_centroid(centroid, x_factor=1, y_factor=1): + def _upsample_centroid(centroid, x=1, y=1): corners = _get_corners_from_centroids(centroid) - new_corners = _upsample_corners(corners, x_factor=x_factor, y_factor=y_factor) - new_centroids = _get_centroids_from_corners(new_corners) + # Retrieve corners of the the upsampled grid + new_corners = _linspace2D_between_values(corners, num_x=x - 1, num_y=y - 1) + # Get centroids from corners + new_centroids = (new_corners[:-1, :-1] + new_corners[1:, 1:]) / 2 return new_centroids + def upsample_centroids(centroid_x, centroid_y, centroid_z, x=1, y=1): + x_new_centroids = _upsample_centroid(centroid_x, x=x, y=y) + y_new_centroids = _upsample_centroid(centroid_y, x=x, y=y) + z_new_centroids = _upsample_centroid(centroid_z, x=x, y=y) + return x_new_centroids, y_new_centroids, z_new_centroids + + # --------------------------------------------------------------------. # Define geodetic and geocentric projection geocent = pyproj.Proj(proj='geocent') latlong = pyproj.Proj(proj='latlong') @@ -913,10 +874,9 @@ def _upsample_centroid(centroid, x_factor=1, y_factor=1): # x, # y) # res1 = xr.DataArray(res1, dims=['y', 'x', 'coord'], coords=src_lons.coords) - res = np.stack(upsample_centroids(res[:, :, 0].data, res[:, :, 1].data, - res[:, :, 2].data, x_factor=x, y_factor=y), axis=2) + res[:, :, 2].data, x=x, y=y), axis=2) new_centroids = xr.DataArray(da.from_array(res), dims=['y', 'x', 'xyz']) # Back-conversion to geographic CRS @@ -1267,6 +1227,123 @@ def _convert_2D_array(arr, to, dims=None): raise NotImplementedError +def _linspace1D_between_values(arr, num=0): + """Dask-friendly function linearly interpolating values between each 1D array values. + + This function does not perform extrapolation. + It expects a 1D array as input! + + Parameters + ---------- + arr : (np.ndarray, dask.array.Array) + Numpy or Dask Array to be linearly interpolated between values. + num : int, optional + The number of linearly spaced values to infer between array values. + The default is 0. + + Returns + ------- + arr : (np.ndarray, dask.array.Array) + Numpy or Dask Array with in-between linearly interpolated values. + + Example + ------- + + Function call: _linspace1D_between_values(arr, num=1) + Input array: + np.array([5.0, 7.0]) + + Output array: + np.array([5.0, 6.0, 7.0]) + """ + import xarray as xr + + # Check input validity + if arr.ndim != 1: + raise ValueError("'_linspace1D_between_values' expects a 1D array.") + num = int(num) + if num < 0: + raise ValueError("'x' and 'y' must be an integer equal or larger than 0.") + if num == 0: + return arr + # Define src and dst ties + dst_N = (arr.size - 1) * (num + 1) + 1 + src_ties = np.arange(dst_N, step=num + 1) + dst_ties = np.arange(dst_N) + # Interpolate + da = xr.DataArray( + data=arr, + dims=("x"), + coords={"x": src_ties} + ) + da_interp = da.interp(x=dst_ties, method="linear") + return da_interp.data + + +def _linspace2D_between_values(arr, num_x=0, num_y=0): + """Dask-friendly function linearly interpolating values between each 2D array values. + + This function does not perform extrapolation. + It expects a 2D array as input! + + Parameters + ---------- + arr : (np.ndarray, dask.array.Array) + Numpy or Dask Array to be linearly interpolated between values. + num_x : int, optional + The number of linearly spaced values to infer between array values (along x). + . The default is 0. + num_y : int, optional + The number of linearly spaced values to infer between array values (along y). + The default is 0. + + Returns + ------- + arr : (np.ndarray, dask.array.Array) + Numpy or Dask Array with in-between linearly interpolated values. + + Example + ------- + + Function call: _linspace1D_between_values(arr, num=1) + Input: + np.array([[5.0, 7.0], + [7.0, 9.0]]) + Output: + np.array([[5.0, 6.0, 7.0], + [6.0, 7.0, 8.0], + [7.0, 8.0, 9.0]]) + """ + import xarray as xr + + # Check input validity + if arr.ndim != 2: + raise ValueError("'_linspace2D_between_values' expects a 2D array.") + num_x = int(num_x) + num_y = int(num_y) + if num_x < 0 or num_y < 0: + raise ValueError("'x' and 'y' must be an integer equal or larger than 0.") + if num_x == 0 and num_y == 0: + return arr + # Define src and dst ties + shape = arr.shape + Nx_dst = (shape[1] - 1) * (num_x + 1) + 1 + Ny_dst = (shape[0] - 1) * (num_y + 1) + 1 + + src_ties_x = np.arange(Nx_dst, step=num_x + 1) + src_ties_y = np.arange(Ny_dst, step=num_y + 1) + dst_ties_x = np.arange(Nx_dst) + dst_ties_y = np.arange(Ny_dst) + # Interpolate + da = xr.DataArray( + data=arr, + dims=("y", "x"), + coords={"y": src_ties_y, "x": src_ties_x} + ) + da_interp = da.interp({"y": dst_ties_y, "x": dst_ties_x}, method="linear") + return da_interp.data + + def _get_extended_lonlats(lon_start, lat_start, lon_end, lat_end, npts, transpose=True): """Utils employed by SwathDefinition.extend. diff --git a/pyresample/test/test_geometry.py b/pyresample/test/test_geometry.py index 27fb6115e..c842f4f00 100644 --- a/pyresample/test/test_geometry.py +++ b/pyresample/test/test_geometry.py @@ -2151,6 +2151,63 @@ def test_latslons_arr_conversion(self): self.assertRaises(TypeError, _convert_2D_array, [dict_format['Numpy']], 'numpy') self.assertRaises(ValueError, _convert_2D_array, dict_format['Numpy'], 'unvalid_format') + def test_linspace1D_between_values(self): + """Test linspace1D_between_values.""" + import dask.array as da + import numpy as np + + from pyresample.geometry import _linspace1D_between_values + arr_np = np.array([5.0, 7.0, 9.0]) + arr_dask = da.from_array(arr_np) + + res_np = _linspace1D_between_values(arr_np, num=1) + res_dask = _linspace1D_between_values(arr_dask, num=1) + + np.testing.assert_allclose(res_np, [5., 6., 7., 8., 9.]) + np.testing.assert_allclose(res_dask, [5., 6., 7., 8., 9.]) + assert isinstance(res_np, np.ndarray) + assert isinstance(res_dask, da.Array) + + # Test for no interpolation inbetween values + res = _linspace1D_between_values(arr_np, num=0) + np.testing.assert_allclose(res, arr_np) + + # Test for valid inputs + self.assertRaises(ValueError, _linspace1D_between_values, arr_np, -1) + self.assertRaises(ValueError, _linspace1D_between_values, np.zeros((2, 2)), 0) + + def test_linspace2D_between_values(self): + """Test linspace2D_between_values.""" + import dask.array as da + import numpy as np + + from pyresample.geometry import _linspace2D_between_values + arr_np = np.array([[5.0, 7.0], + [7.0, 9.0]]) + arr_dask = da.from_array(arr_np) + + res_np = _linspace2D_between_values(arr_np, num_x=1, num_y=3) + res_dask = _linspace2D_between_values(arr_dask, num_x=1, num_y=3) + + output_expected = np.array([[5., 6., 7.], + [5.5, 6.5, 7.5], + [6., 7., 8.], + [6.5, 7.5, 8.5], + [7., 8., 9.]]) + np.testing.assert_allclose(res_np, output_expected) + np.testing.assert_allclose(res_dask, output_expected) + assert isinstance(res_np, np.ndarray) + assert isinstance(res_dask, da.Array) + + # Test for no interpolation inbetween values + res = _linspace2D_between_values(arr_np, num_x=0, num_y=0) + np.testing.assert_allclose(res, arr_np) + + # Test for valid inputs + self.assertRaises(ValueError, _linspace2D_between_values, arr_np, -1, 0) + self.assertRaises(ValueError, _linspace2D_between_values, arr_np, 0, -1) + self.assertRaises(ValueError, _linspace2D_between_values, arr_np[0, :], 0, 0) + def test_get_extended_lonlats(self): import numpy as np From e80f44ec28644867d02ca88a475bd9c5353c6f2a Mon Sep 17 00:00:00 2001 From: ghiggi Date: Wed, 2 Feb 2022 18:26:25 +0100 Subject: [PATCH 12/12] PEP fixes --- pyresample/geometry.py | 459 ++++++++++------------------ pyresample/test/test_geometry.py | 288 ++++++++--------- pyresample/test/test_utils_array.py | 122 ++++++++ pyresample/utils/array.py | 150 +++++++++ 4 files changed, 589 insertions(+), 430 deletions(-) create mode 100644 pyresample/test/test_utils_array.py create mode 100644 pyresample/utils/array.py diff --git a/pyresample/geometry.py b/pyresample/geometry.py index af76ea1c8..c026bfe80 100644 --- a/pyresample/geometry.py +++ b/pyresample/geometry.py @@ -27,6 +27,7 @@ from typing import Optional import numpy as np +import pyproj import yaml from pyproj import Geod, transform @@ -41,6 +42,7 @@ proj4_dict_to_str, proj4_radius_parameters, ) +from pyresample.utils.array import _convert_2D_array try: from xarray import DataArray @@ -713,25 +715,30 @@ def copy(self): @staticmethod def _do_transform(src, dst, lons, lats, alt): - """Run pyproj.transform and stack the results.""" - x, y, z = transform(src, dst, lons, lats, alt) + """Perform pyproj transformation and stack the results. + + If using pyproj >= 3.1, it employs thread-safe pyproj.transformer.Transformer. + If using pyproj < 3.1, it employs pyproj.transform. + + Docs: https://pyproj4.github.io/pyproj/stable/advanced_examples.html#multithreading + """ + if float(pyproj.__version__[0:3]) >= 3.1: + from pyproj import Transformer + transformer = Transformer.from_crs(src.crs, dst.crs) + x, y, z = transformer.transform(lons, lats, alt, radians=False) + else: + x, y, z = transform(src, dst, lons, lats, alt) return np.dstack((x, y, z)) - # TODO: this is more efficient than pyproj.transform. - # With pyproj > 3.1 it became thread safe: - # https://pyproj4.github.io/pyproj/stable/advanced_examples.html#multithreading - # @staticmethod - # def swath_do_transform(src, dst, lons, lats, alt): - # """Run pyproj Transformer and stack the results.""" - # from pyproj import Transformer - # transformer = Transformer.from_crs(src.crs, dst.crs) - # x, y, z = transformer.transform(lons, lats, alt, radians=False) - # return np.dstack((x, y, z)) + def aggregate(self, **dims): + """Return an aggregated version of the area.""" + warnings.warn("'aggregate' is deprecated, use 'downsample' instead.", PendingDeprecationWarning) + return self.downsample(x=dims.get('x', 1), y=dims.get('y', 1)) def downsample(self, x=1, y=1, **kwargs): - """Downsample the swath definition by averaging the coordinates along x and y dimension. + """Downsample the SwathDefinition along x (columns) and y (lines) dimensions. - Builds upon xarray.DataArray.coarsen function. + Builds upon xarray.DataArray.coarsen averaging function. To downsample of a factor of 2, call swath_def.downsample(x=2, y=2) swath_def.downsample(x=1, y=1) simply returns the current swath_def. By default, it raise a ValueError if the dimension size is not a multiple of the window size. @@ -739,14 +746,13 @@ def downsample(self, x=1, y=1, **kwargs): See https://xarray.pydata.org/en/stable/generated/xarray.DataArray.coarsen.html for further details. """ import dask.array as da - import pyproj import xarray as xr # Check input validity x = int(x) y = int(y) if x < 1 or y < 1: - raise ValueError('x and y arguments must be positive integers larger or equal to 1.') + raise ValueError('SwathDefinition.downsample expects (integer) aggregation factors >=1 .') # Return SwathDefinition if nothing to downsample if x == 1 and y == 1: @@ -757,8 +763,9 @@ def downsample(self, x=1, y=1, **kwargs): latlong = pyproj.Proj(proj='latlong') # Get xr.DataArray with dask array - src_lats, src_lats_format = _convert_2D_array(self.lats, to='DataArray_Dask', dims=['y', 'x']) + # - If input lats/lons are xr.DataArray, the specified dims ['x','y'] are ignored src_lons, src_lons_format = _convert_2D_array(self.lons, to='DataArray_Dask', dims=['y', 'x']) + src_lats, src_lats_format = _convert_2D_array(self.lats, to='DataArray_Dask', dims=['y', 'x']) # Conversion to Geocentric Cartesian (x,y,z) CRS res = da.map_blocks(self._do_transform, latlong, geocent, @@ -781,14 +788,14 @@ def downsample(self, x=1, y=1, **kwargs): chunks=res.data.chunks) # Back-conversion array as input format - lons, _ = _convert_2D_array(lonlatalt[:, :, 0], to=src_lons_format, dims=['y', 'x']) - lats, _ = _convert_2D_array(lonlatalt[:, :, 1], to=src_lats_format, dims=['y', 'x']) + lons, _ = _convert_2D_array(lonlatalt[:, :, 0], to=src_lons_format, dims=src_lons.dims) + lats, _ = _convert_2D_array(lonlatalt[:, :, 1], to=src_lats_format, dims=src_lats.dims) # Add additional info if the source array is a DataArray if isinstance(self.lats, xr.DataArray) and isinstance(self.lons, xr.DataArray): lats = lats.assign_coords(res.coords) - lats.attrs = self.lats.attrs.copy() lons = lons.assign_coords(res.coords) + lats.attrs = self.lats.attrs.copy() lons.attrs = self.lons.attrs.copy() try: resolution = lons.attrs['resolution'] * ((x + y) / 2) @@ -801,16 +808,17 @@ def downsample(self, x=1, y=1, **kwargs): return SwathDefinition(lons, lats) def upsample(self, x=1, y=1): - """Upsample the swath definition along x (along-track) and y (cross-track) dimensions. + """Upsample the SwathDefinition along x (columns) and y (lines) dimensions. - To upsample of a factor of 2 (each pixel splitted in 4 pixels), call swath_def.upsample(x=2, y=2). + To upsample of a factor of 2 (each pixel splitted in 2x2 pixels), + call swath_def.upsample(x=2, y=2). swath_def.upsample(x=1, y=1) simply returns the current swath_def. """ # TODO: An alternative would be to use geotiepoints.geointerpolator.GeoInterpolator # But I have some problem using it, see code snippet in a comment of the PR. + # TODO: Should we upsample also possible coords of lons/lats input xr.DataArray? import dask.array as da import numpy as np - import pyproj import xarray as xr from xarray.plot.utils import _infer_interval_breaks @@ -819,8 +827,7 @@ def upsample(self, x=1, y=1): x = int(x) y = int(y) if x < 1 or y < 1: - raise ValueError('x and y arguments must be positive integers larger or equal to 1.') - + raise ValueError("SwathDefinition.upsample expects (integer) upscaling factors >=1 .") # Return SwathDefinition if nothing to upsample if x == 1 and y == 1: return self @@ -834,6 +841,8 @@ def _get_corners_from_centroids(centroids): corners = _infer_interval_breaks(breaks_xx, axis=0) return corners + # TODO: choose one of the two function below + # - What is the best way to apply _upsample_centroid along each x-y plane with dask def _upsample_centroid(centroid, x=1, y=1): corners = _get_corners_from_centroids(centroid) # Retrieve corners of the the upsampled grid @@ -854,8 +863,9 @@ def upsample_centroids(centroid_x, centroid_y, centroid_z, x=1, y=1): latlong = pyproj.Proj(proj='latlong') # Get xr.DataArray with dask array - src_lats, src_lats_format = _convert_2D_array(self.lats, to='DataArray_Dask', dims=['y', 'x']) + # - If input lats/lons are xr.DataArray, the specified dims ['x','y'] are ignored src_lons, src_lons_format = _convert_2D_array(self.lons, to='DataArray_Dask', dims=['y', 'x']) + src_lats, src_lats_format = _convert_2D_array(self.lats, to='DataArray_Dask', dims=['y', 'x']) # Conversion to Geocentric Cartesian (x,y,z) CRS res = da.map_blocks(self._do_transform, latlong, geocent, @@ -888,8 +898,8 @@ def upsample_centroids(centroid_x, centroid_y, centroid_z, x=1, y=1): chunks=new_centroids.data.chunks) # Back-conversion array as input format - lons, _ = _convert_2D_array(lonlatalt[:, :, 0], to=src_lons_format, dims=['y', 'x']) - lats, _ = _convert_2D_array(lonlatalt[:, :, 1], to=src_lats_format, dims=['y', 'x']) + lons, _ = _convert_2D_array(lonlatalt[:, :, 0], to=src_lons_format, dims=src_lons.dims) + lats, _ = _convert_2D_array(lonlatalt[:, :, 1], to=src_lats_format, dims=src_lats.dims) # Add additional info if the source array is a DataArray if isinstance(self.lats, xr.DataArray) and isinstance(self.lons, xr.DataArray): @@ -905,22 +915,23 @@ def upsample_centroids(centroid_x, centroid_y, centroid_z, x=1, y=1): # Return the downsampled swath definition return SwathDefinition(lons, lats) - def extend(self, x=0, y=0): - """Extend the swath definition along x (along-track) and y (across-track) dimensions. + def extend(self, left=0, right=0, bottom=0, top=0): + """Extend the SwathDefinition of n pixels on specific boundary sides. - By default, it does not extend on any direction. - To extend of n pixel on both sides of the across-track direction, call swath_def.extend(x=0, y=2). + By default, it does not extend on any side. """ import xarray as xr # Check input validity - x = int(x) - y = int(y) - if x < 0 or y < 0: - raise ValueError('x and y arguments must be positive integers.') + left = int(left) + right = int(right) + bottom = int(bottom) + top = int(top) + if left < 0 or right < 0 or bottom < 0 or top < 0: + raise ValueError('SwathDefinition.extend expects positive numbers of pixels.') # Return SwathDefinition if nothing to extend - if x == 0 and y == 0: + if left == 0 and right == 0 and bottom == 0 and top == 0: return self # Get lats/lons numpy arrays @@ -930,26 +941,29 @@ def extend(self, x=0, y=0): dst_lats = src_lats dst_lons = src_lons - # Extend on y direction (side0 and side2) - if y > 0: - list_side0 = (src_lons[1, :], src_lats[1, :], src_lons[0, :], src_lats[0, :]) - list_side2 = (src_lons[-2, :], src_lats[-2, :], src_lons[-1, :], src_lats[-1, :]) - extended_side0_lonlats = _get_extended_lonlats(*list_side0, npts=y) - extended_side2_lonlats = _get_extended_lonlats(*list_side2, npts=y) + # Extend swath sides + if top > 0: + list_side0 = (dst_lons[1, :], dst_lats[1, :], dst_lons[0, :], dst_lats[0, :]) + extended_side0_lonlats = _get_extended_lonlats(*list_side0, npts=top) dst_lats = np.concatenate((extended_side0_lonlats[1][::-1, :], dst_lats), axis=0) - dst_lats = np.concatenate((dst_lats, extended_side2_lonlats[1]), axis=0) dst_lons = np.concatenate((extended_side0_lonlats[0][::-1, :], dst_lons), axis=0) + + if bottom > 0: + list_side2 = (dst_lons[-2, :], dst_lats[-2, :], dst_lons[-1, :], dst_lats[-1, :]) + extended_side2_lonlats = _get_extended_lonlats(*list_side2, npts=bottom) + dst_lats = np.concatenate((dst_lats, extended_side2_lonlats[1]), axis=0) dst_lons = np.concatenate((dst_lons, extended_side2_lonlats[0]), axis=0) - # Extend on x direction (side1 and side3) - if x > 0: + if right > 0: list_side1 = (dst_lons[:, -2], dst_lats[:, -2], dst_lons[:, -1], dst_lats[:, -1]) - list_side3 = (dst_lons[:, 1], dst_lats[:, 1], dst_lons[:, 0], dst_lats[:, 0]) - extended_side1_lonlats = _get_extended_lonlats(*list_side1, npts=x, transpose=False) - extended_side3_lonlats = _get_extended_lonlats(*list_side3, npts=x, transpose=False) + extended_side1_lonlats = _get_extended_lonlats(*list_side1, npts=right, transpose=False) dst_lats = np.concatenate((dst_lats, extended_side1_lonlats[1]), axis=1) - dst_lats = np.concatenate((extended_side3_lonlats[1][:, ::-1], dst_lats), axis=1) dst_lons = np.concatenate((dst_lons, extended_side1_lonlats[0]), axis=1) + + if left > 0: + list_side3 = (dst_lons[:, 1], dst_lats[:, 1], dst_lons[:, 0], dst_lats[:, 0]) + extended_side3_lonlats = _get_extended_lonlats(*list_side3, npts=left, transpose=False) + dst_lats = np.concatenate((extended_side3_lonlats[1][:, ::-1], dst_lats), axis=1) dst_lons = np.concatenate((extended_side3_lonlats[0][:, ::-1], dst_lons), axis=1) # Back-conversion array as input format @@ -964,36 +978,37 @@ def extend(self, x=0, y=0): # Return the extended SwathDefinition return SwathDefinition(lons, lats) - def reduce(self, x=0, y=0): - """Reduce the swath definition along x (along-track) and y (across-track) dimensions. + def shrink(self, left=0, right=0, bottom=0, top=0): + """Shrink the SwathDefinition of n pixels on specific boundary sides. - By default, it does not reduce on any direction. - To reduce of n pixel on both sides of the across-track direction, call swath_def.reduce(x=0, y=2). + By default, it does not shrink on any side. """ - # Check input validity (ensure reduced area is at least 2x2) - height = self.lats.shape[0] - width = self.lats.shape[1] - x = int(x) - y = int(y) - if x < 0 or y < 0: - raise ValueError('x and y arguments must be positive integers.') - if x >= np.floor(width / 2): - max_x = int(np.floor(width / 2)) - 1 - raise ValueError("""You can at maximum reduce the along-track direction (x) - of SwathDef by {} pixels on each side.""".format(max_x)) - if y >= np.floor(height / 2): - max_y = int(np.floor(height / 2)) - 1 - raise ValueError("""You can at maximum reduce the across-track direction (y) - of SwathDef by {} pixels on each side.""".format(max_y)) - - # Return SwathDefinition if nothing to reduce - if x == 0 and y == 0: + # Check input validity + left = int(left) + right = int(right) + bottom = int(bottom) + top = int(top) + if left < 0 or right < 0 or bottom < 0 or top < 0: + raise ValueError('SwathDefinition.shrink expects positive numbers of pixels.') + + # Return SwathDefinition if nothing to shrink + if left == 0 and right == 0 and bottom == 0 and top == 0: return self - # Return the reduced SwathDefinition - lats = self.lats[slice(0 + y, height - y), slice(0 + x, width - x)] - lons = self.lons[slice(0 + y, height - y), slice(0 + x, width - x)] - return SwathDefinition(lons, lats) + # Ensure shrinked area is at least 2x2 + height = self.lats.shape[0] + width = self.lats.shape[1] + x_max_shrink = width - 2 + y_max_shrink = height - 2 + if (left + right) > x_max_shrink: + raise ValueError("SwathDefinition.shrink can drop maximum {} pixels " + "along the x direction.".format(x_max_shrink)) + if (top + bottom) > y_max_shrink: + raise ValueError("SwathDefinition.shrink can drop maximum {} pixels " + "along the y direction.".format(y_max_shrink)) + + # Return the shrinked SwathDefinition + return self[slice(top, height - bottom), slice(left, width - right)] def __hash__(self): """Compute the hash of this object.""" @@ -1137,149 +1152,6 @@ def compute_optimal_bb_area(self, proj_dict=None): return area.freeze((lons, lats), shape=(height, width)) -def _convert_2D_array(arr, to, dims=None): - """ - Convert a 2D array to a specific format. - - Useful to return swath lons, lats in the same original format after processing. - - Parameters - ---------- - arr : (np.ndarray, da.Array, xr.DataArray) - The 2D array to be converted to another array format. - to : TYPE - The desired array output format. - Accepted formats are: ['Numpy','Dask', 'DataArray_Numpy','DataArray_Dask'] - dims : tuple, optional - Optional argument for the specification of DataArray dimension names - if input array is Numpy or Dask. - Provide a tuple with (y_dimname, x_dimname). - The default is None --> (dim_0, dim_1) - - Returns - ------- - dst_arr : (np.ndarray, da.Array, xr.DataArray) - The converted 2D array. - src_format: str - The source format of the 2D array. - - """ - import dask.array as da - import numpy as np - import xarray as xr - - # Checks - valid_format = ['Numpy', 'Dask', 'DataArray_Numpy', 'DataArray_Dask'] - if not isinstance(to, str): - raise TypeError("'to' must be a string indicating the conversion array format.") - if not np.isin(to.lower(), np.char.lower(valid_format)): - raise ValueError("Valid conversion array formats are {}".format(valid_format)) - if not isinstance(arr, (np.ndarray, da.Array, xr.DataArray)): - raise TypeError("The provided array must be either a np.ndarray, a dask.Array or a xr.DataArray.") - # Numpy - if isinstance(arr, np.ndarray): - if to.lower() == 'numpy': - dst_arr = arr - elif to.lower() == 'dask': - dst_arr = da.from_array(arr) - elif to.lower() == 'dataarray_numpy': - dst_arr = xr.DataArray(arr, dims=dims) - else: # to.lower() == 'dataarray_dask': - dst_arr = xr.DataArray(da.from_array(arr), dims=dims) - return dst_arr, 'numpy' - # Dask - elif isinstance(arr, da.Array): - if to.lower() == 'numpy': - dst_arr = arr.compute() - elif to.lower() == 'dask': - dst_arr = arr - elif to.lower() == 'dataarray_numpy': - dst_arr = xr.DataArray(arr.compute(), dims=dims) - else: # to.lower() == 'dataarray_dask': - dst_arr = xr.DataArray(arr, dims=dims) - return dst_arr, 'dask' - - # DataArray_Numpy - elif isinstance(arr, xr.DataArray) and isinstance(arr.data, np.ndarray): - if to.lower() == 'numpy': - dst_arr = arr.data - elif to.lower() == 'dask': - dst_arr = da.from_array(arr.data) - elif to.lower() == 'dataarray_numpy': - dst_arr = arr - else: # to.lower() == 'dataarray_dask': - dst_arr = xr.DataArray(da.from_array(arr.data), dims=dims) - return dst_arr, 'DataArray_Numpy' - - # DataArray_Dask - elif isinstance(arr, xr.DataArray) and isinstance(arr.data, da.Array): - if to.lower() == 'numpy': - dst_arr = arr.data.compute() - elif to.lower() == 'dask': - dst_arr = arr.data - elif to.lower() == 'dataarray_numpy': - dst_arr = arr.compute() - else: # to.lower() == 'dataarray_dask': - dst_arr = arr - return dst_arr, 'DataArray_Dask' - - else: - raise NotImplementedError - - -def _linspace1D_between_values(arr, num=0): - """Dask-friendly function linearly interpolating values between each 1D array values. - - This function does not perform extrapolation. - It expects a 1D array as input! - - Parameters - ---------- - arr : (np.ndarray, dask.array.Array) - Numpy or Dask Array to be linearly interpolated between values. - num : int, optional - The number of linearly spaced values to infer between array values. - The default is 0. - - Returns - ------- - arr : (np.ndarray, dask.array.Array) - Numpy or Dask Array with in-between linearly interpolated values. - - Example - ------- - - Function call: _linspace1D_between_values(arr, num=1) - Input array: - np.array([5.0, 7.0]) - - Output array: - np.array([5.0, 6.0, 7.0]) - """ - import xarray as xr - - # Check input validity - if arr.ndim != 1: - raise ValueError("'_linspace1D_between_values' expects a 1D array.") - num = int(num) - if num < 0: - raise ValueError("'x' and 'y' must be an integer equal or larger than 0.") - if num == 0: - return arr - # Define src and dst ties - dst_N = (arr.size - 1) * (num + 1) + 1 - src_ties = np.arange(dst_N, step=num + 1) - dst_ties = np.arange(dst_N) - # Interpolate - da = xr.DataArray( - data=arr, - dims=("x"), - coords={"x": src_ties} - ) - da_interp = da.interp(x=dst_ties, method="linear") - return da_interp.data - - def _linspace2D_between_values(arr, num_x=0, num_y=0): """Dask-friendly function linearly interpolating values between each 2D array values. @@ -1305,7 +1177,7 @@ def _linspace2D_between_values(arr, num_x=0, num_y=0): Example ------- - Function call: _linspace1D_between_values(arr, num=1) + Function call: _linspace2D_between_values(arr, num_x=1, num_y=1) Input: np.array([[5.0, 7.0], [7.0, 9.0]]) @@ -1344,14 +1216,15 @@ def _linspace2D_between_values(arr, num_x=0, num_y=0): return da_interp.data -def _get_extended_lonlats(lon_start, lat_start, lon_end, lat_end, npts, transpose=True): +def _get_extended_lonlats(lon_start, lat_start, lon_end, lat_end, npts, + ellps="sphere", + transpose=True): """Utils employed by SwathDefinition.extend. It extrapolate npts following the forward azimuth with an interdistance equal to the distance between the starting point and the end point. """ - import pyproj - geod = pyproj.Geod(ellps='sphere') # TODO: sphere or WGS84? + geod = pyproj.Geod(ellps=ellps) # geod = pyproj.Geod(ellps='WGS84') # sphere az12_arr, _, dist_arr = geod.inv(lon_start, lat_start, lon_end, lat_end) list_lat = [] @@ -1900,63 +1773,74 @@ def copy(self, **override_kwargs): return AreaDefinition(**kwargs) def aggregate(self, **dims): - """Return an aggregated version of the area.""" - warnings.warn("'aggregate' is deprecated, use 'downsample' or 'upsample' instead.", PendingDeprecationWarning) - width = int(self.width / dims.get('x', 1)) - height = int(self.height / dims.get('y', 1)) + """Return an aggregated version of the area. + + Aggregate allows to mix between downsample and upsample in different directions. + Example: area_def.aggregate(x=2, y=0.5) <-> area_def.downsample(x=2).upsample(y=2). + """ + x = dims.get('x', 1) + y = dims.get('y', 1) + if x <= 0 or y <= 0: + raise ValueError('AreaDefinition.aggregate x and y arguments must be > 0.') + if x == 1 and y == 1: + return self + width = int(self.width / x) + height = int(self.height / y) return self.copy(height=height, width=width) def downsample(self, x=1, y=1): """Return a downsampled version of the area.""" # Check input validity - x = int(x) - y = int(y) if x == 1 and y == 1: return self if x < 1 or y < 1: - raise ValueError('x and y arguments must be positive integers larger or equal to 1.') + raise ValueError('AreaDefinition.downsample x and y arguments must be >= 1.') # Downsample - width = int(self.width / x) - height = int(self.height / y) - return self.copy(height=height, width=width) + return self.aggregate(x=x, y=y) def upsample(self, x=1, y=1): """Return an upsampled version of the area.""" # Check input validity - x = int(x) - y = int(y) if x == 1 and y == 1: return self if x < 1 or y < 1: - raise ValueError('x and y arguments must be positive integers larger or equal to 1.') + raise ValueError('AreaDefinition.upsample x and y arguments must be >= 1.') # Upsample - width = int(self.width * x) - height = int(self.height * y) - return self.copy(height=height, width=width) + return self.aggregate(x=1 / x, y=1 / y) + + def extend(self, left=0, right=0, bottom=0, top=0): + """Extend AreaDefinition by n pixels on specific boundary sides. - def extend(self, x=0, y=0): - """Extend AreaDef by x/y pixels on each side.""" + By default, it does not extend on any side. + """ if self.is_geostationary: - raise NotImplementedError("'extend' method is not implemented for GEO AreaDefinition.") + raise NotImplementedError("AreaDefinition.extend method is not implemented for GEO AreaDefinition.") # Check input validity - x = int(x) - y = int(y) - if x < 0 or y < 0: - raise ValueError('x and y arguments must be positive integers.') - if x == 0 and y == 0: + left = int(left) + right = int(right) + bottom = int(bottom) + top = int(top) + if left < 0 or right < 0 or bottom < 0 or top < 0: + raise ValueError('AreaDefinition.extend expects positive numbers of pixels.') + + # Return AreaDefinition if nothing to extend + if left == 0 and right == 0 and bottom == 0 and top == 0: return self + # Retrieve pixel and area info - new_width = self.width + 2 * x - new_height = self.height + 2 * y + new_width = self.width + left + right + new_height = self.height + bottom + top pixel_size_x = self.pixel_size_x pixel_size_y = self.pixel_size_y + # Extend area_extent (lower_left_x, lower_left_y, upper_right_x, upper_right_y) area_extent = self._area_extent new_area_extent = list(area_extent) - new_area_extent[0] = new_area_extent[0] - pixel_size_x * x - new_area_extent[1] = new_area_extent[1] - pixel_size_y * y - new_area_extent[2] = new_area_extent[2] + pixel_size_x * x - new_area_extent[3] = new_area_extent[3] + pixel_size_y * y + new_area_extent[0] = new_area_extent[0] - pixel_size_x * left + new_area_extent[1] = new_area_extent[1] - pixel_size_y * bottom + new_area_extent[2] = new_area_extent[2] + pixel_size_x * right + new_area_extent[3] = new_area_extent[3] + pixel_size_y * top + # Define new AreaDefinition projection = self.crs_wkt area_def = AreaDefinition(self.area_id, self.description, self.proj_id, @@ -1970,49 +1854,38 @@ def extend(self, x=0, y=0): return area_def - def reduce(self, x=0, y=0): - """Reduce AreaDef by x/y pixels on each side.""" + def shrink(self, left=0, right=0, bottom=0, top=0): + """Shrink AreaDefinition by n pixels on specific boundary sides. + + By default, it does not shrink on any side. + """ if self.is_geostationary: - raise NotImplementedError("'reduce' method is not implemented for GEO AreaDefinition.") - # Check input validity (ensure reduced area is at least 2x2) - width = self.width - height = self.height - x = int(x) - y = int(y) - if x == 0 and y == 0: + raise NotImplementedError("AreaDefinition.shrink method is not implemented for GEO AreaDefinition.") + # Check input validity + left = int(left) + right = int(right) + bottom = int(bottom) + top = int(top) + if left < 0 or right < 0 or bottom < 0 or top < 0: + raise ValueError('AreaDefinition.shrink expects positive numbers of pixels.') + + # Return AreaDefinition if nothing to extend + if left == 0 and right == 0 and bottom == 0 and top == 0: return self - if x < 0 or y < 0: - raise ValueError('x and y arguments must be positive integers.') - if x >= np.floor(width / 2): - max_x = int(np.floor(width / 2)) - 1 - raise ValueError("You can at maximum reduce width (x) of AreaDef by {} pixels on each side.".format(max_x)) - if y >= np.floor(height / 2): - max_y = int(np.floor(height / 2)) - 1 - raise ValueError("You can at maximum reduce height (y) of AreaDef by {} pixels on each side.".format(max_y)) - # Retrieve pixel and area info - new_width = self.width - 2 * x - new_height = self.height - 2 * y - pixel_size_x = self.pixel_size_x - pixel_size_y = self.pixel_size_y - area_extent = self._area_extent - # Extend area_extent (lower_left_x, lower_left_y, upper_right_x, upper_right_y) - new_area_extent = list(area_extent) - new_area_extent[0] = new_area_extent[0] + pixel_size_x * x - new_area_extent[1] = new_area_extent[1] + pixel_size_y * y - new_area_extent[2] = new_area_extent[2] - pixel_size_x * x - new_area_extent[3] = new_area_extent[3] - pixel_size_y * y - # Define new AreaDefinition - projection = self.crs_wkt - area_def = AreaDefinition(self.area_id, self.description, self.proj_id, - projection=projection, - width=new_width, - height=new_height, - area_extent=new_area_extent, - rotation=self.rotation, - nprocs=self.nprocs, - dtype=self.dtype) - return area_def + # Ensure shrinked area is at least 2x2 + width = self.width + height = self.height + x_max_shrink = width - 2 + y_max_shrink = height - 2 + if (left + right) > x_max_shrink: + raise ValueError("AreaDefinition.shrink can drop maximum {} pixels " + "along the x direction.".format(x_max_shrink)) + if (top + bottom) > y_max_shrink: + raise ValueError("AreaDefinition.shrink can drop maximum {} pixels " + "along the y direction.".format(y_max_shrink)) + + return self[slice(top, height - bottom), slice(left, width - right)] @property def resolution(self): diff --git a/pyresample/test/test_geometry.py b/pyresample/test/test_geometry.py index c842f4f00..14b012abd 100644 --- a/pyresample/test/test_geometry.py +++ b/pyresample/test/test_geometry.py @@ -1997,8 +1997,10 @@ def test_extend(self): import xarray as xr from pyresample.geometry import SwathDefinition - x = 2 - y = 2 + top = 2 + bottom = 2 + left = 2 + right = 2 resolution = 4 lons = np.arange(-179.5, -178.5, 0.5) @@ -2019,9 +2021,9 @@ def test_extend(self): sd_xr = SwathDefinition(lons_xr, lats_xr) sd_xr_dask = SwathDefinition(lons_xr_dask, lats_xr_dask) - res_np = sd_np.extend(y=y, x=x) - res_xr = sd_xr.extend(y=y, x=x) - res_xr_dask = sd_xr_dask.extend(y=y, x=x) + res_np = sd_np.extend(left, right, bottom, top) + res_xr = sd_xr.extend(left, right, bottom, top) + res_xr_dask = sd_xr_dask.extend(left, right, bottom, top) assert isinstance(res_np.lons, np.ndarray) assert isinstance(res_xr.lats, xr.DataArray) @@ -2041,26 +2043,28 @@ def test_extend(self): self.assertAlmostEqual(res_xr.lats.resolution, resolution) # Test skip extension - np.testing.assert_allclose(sd_np.extend(y=0, x=0).lats, sd_np.lats) - np.testing.assert_allclose(sd_np.extend(y=0, x=0).lons, sd_np.lons) + np.testing.assert_allclose(sd_np.extend().lats, sd_np.lats) + np.testing.assert_allclose(sd_np.extend().lons, sd_np.lons) # Test invalid arguments - self.assertRaises(ValueError, sd_np.extend, -1, -1) + self.assertRaises(ValueError, sd_np.extend, -1, -1, 0, 0) # Test works with DataArray also without attrs lats_xr = xr.DataArray(lats, dims=['y', 'x']) lons_xr = xr.DataArray(lons, dims=['y', 'x']) sd_xr1 = SwathDefinition(lons_xr, lats_xr) - res_xr1 = sd_xr1.extend(y=y, x=x) + res_xr1 = sd_xr1.extend(left, right, bottom, top) np.testing.assert_allclose(res_xr1.lats.data, res_xr.lats.data) - def test_reduce(self): - """Test reduce on SwathDefinitions.""" + def test_shrink(self): + """Test shrink on SwathDefinitions.""" import dask.array as da import numpy as np import xarray as xr from pyresample.geometry import SwathDefinition - x = 1 - y = 0 + right = 1 + left = 1 + bottom = 0 + top = 0 resolution = 4 lons = np.arange(-179.5, -177.5, 0.5) @@ -2081,9 +2085,9 @@ def test_reduce(self): sd_xr = SwathDefinition(lons_xr, lats_xr) sd_xr_dask = SwathDefinition(lons_xr_dask, lats_xr_dask) - res_np = sd_np.reduce(y=y, x=x) - res_xr = sd_xr.reduce(y=y, x=x) - res_xr_dask = sd_xr_dask.reduce(y=y, x=x) + res_np = sd_np.shrink(left, right, bottom, top) + res_xr = sd_xr.shrink(left, right, bottom, top) + res_xr_dask = sd_xr_dask.shrink(left, right, bottom, top) assert isinstance(res_np.lons, np.ndarray) assert isinstance(res_xr.lats, xr.DataArray) @@ -2091,8 +2095,8 @@ def test_reduce(self): assert isinstance(res_xr.lats.data, np.ndarray) assert isinstance(res_xr_dask.lats.data, da.Array) - np.testing.assert_allclose(res_np.lons[:, 0], lons[:, x]) - np.testing.assert_allclose(res_np.lons[:, -1], lons[:, -x - 1]) + np.testing.assert_allclose(res_np.lons[:, 0], lons[:, left]) + np.testing.assert_allclose(res_np.lons[:, -1], lons[:, -right - 1]) np.testing.assert_allclose(res_xr.lons.data, res_np.lons) np.testing.assert_allclose(res_xr.lats.data, res_np.lats) np.testing.assert_allclose(res_xr_dask.lons.values, res_np.lons) @@ -2102,79 +2106,19 @@ def test_reduce(self): self.assertAlmostEqual(res_xr.lats.resolution, resolution) # Test skip reduction - np.testing.assert_allclose(sd_np.reduce(y=0, x=0).lats, sd_np.lats) - np.testing.assert_allclose(sd_np.reduce(y=0, x=0).lons, sd_np.lons) + np.testing.assert_allclose(sd_np.shrink().lats, sd_np.lats) + np.testing.assert_allclose(sd_np.shrink().lons, sd_np.lons) # Test invalid arguments - self.assertRaises(ValueError, sd_np.reduce, -1, -1) + self.assertRaises(ValueError, sd_np.shrink, -1, -1, 0, 0) # Test works with DataArray also without attrs lats_xr = xr.DataArray(lats, dims=['y', 'x']) lons_xr = xr.DataArray(lons, dims=['y', 'x']) sd_xr1 = SwathDefinition(lons_xr, lats_xr) - res_xr1 = sd_xr1.reduce(y=y, x=x) + res_xr1 = sd_xr1.shrink(left, right, bottom, top) np.testing.assert_allclose(res_xr1.lats.data, res_xr.lats.data) # Test it raise Error if x or y are too large and not ensure output to be 2x2 at least - self.assertRaises(ValueError, sd_np.reduce, lons.shape[1] / 2, 0) - self.assertRaises(ValueError, sd_np.reduce, 0, lons.shape[0] / 2) - - def test_latslons_arr_conversion(self): - """Test conversion of 2D arrays between various formats.""" - import dask.array as da - import numpy as np - import xarray as xr - - from pyresample.geometry import _convert_2D_array - - # Test all conversion works - lons = np.arange(-179.5, -177.5, 0.5) - lats = np.arange(-89.5, -88.0, 0.5) - lons, lats = np.meshgrid(lons, lats) - lats_dask = da.from_array(lats, chunks=2) - lats_xr = xr.DataArray(lats, dims=['y', 'x']) - lats_xr_dask = xr.DataArray(lats_dask, dims=['y', 'x']) - valid_format = ['Numpy', 'Dask', 'DataArray_Numpy', 'DataArray_Dask'] - dict_format = {'Numpy': lats, - 'Dask': lats_dask, - 'DataArray_Numpy': lats_xr, - 'DataArray_Dask': lats_xr_dask - } - for in_format in valid_format: - for out_format in valid_format: - out_arr, src_format = _convert_2D_array(dict_format[in_format], to=out_format, dims=None) - assert isinstance(out_arr, type(dict_format[out_format])) - assert src_format.lower() == in_format.lower() - if out_format.lower() == "dataarray_numpy": - assert isinstance(out_arr.data, type(dict_format['Numpy'])) - if out_format.lower() == "dataarray_dask": - assert isinstance(out_arr.data, type(dict_format['Dask'])) - # Test raise errors - self.assertRaises(TypeError, _convert_2D_array, dict_format['Numpy'], ['unvalid_type']) - self.assertRaises(TypeError, _convert_2D_array, [dict_format['Numpy']], 'numpy') - self.assertRaises(ValueError, _convert_2D_array, dict_format['Numpy'], 'unvalid_format') - - def test_linspace1D_between_values(self): - """Test linspace1D_between_values.""" - import dask.array as da - import numpy as np - - from pyresample.geometry import _linspace1D_between_values - arr_np = np.array([5.0, 7.0, 9.0]) - arr_dask = da.from_array(arr_np) - - res_np = _linspace1D_between_values(arr_np, num=1) - res_dask = _linspace1D_between_values(arr_dask, num=1) - - np.testing.assert_allclose(res_np, [5., 6., 7., 8., 9.]) - np.testing.assert_allclose(res_dask, [5., 6., 7., 8., 9.]) - assert isinstance(res_np, np.ndarray) - assert isinstance(res_dask, da.Array) - - # Test for no interpolation inbetween values - res = _linspace1D_between_values(arr_np, num=0) - np.testing.assert_allclose(res, arr_np) - - # Test for valid inputs - self.assertRaises(ValueError, _linspace1D_between_values, arr_np, -1) - self.assertRaises(ValueError, _linspace1D_between_values, np.zeros((2, 2)), 0) + self.assertRaises(ValueError, sd_np.shrink, lons.shape[1] / 2, lons.shape[1] / 2, 0, 0) + self.assertRaises(ValueError, sd_np.shrink, 0, 0, lons.shape[0] / 2, lons.shape[0] / 2) def test_linspace2D_between_values(self): """Test linspace2D_between_values.""" @@ -2898,6 +2842,10 @@ def test_aggregate(self): np.testing.assert_allclose(res.shape, area.shape) assert res == area + # Test raise error + self.assertRaises(ValueError, area.aggregate, x=0, y=0) + self.assertRaises(ValueError, area.aggregate, x=-1, y=-1) + def test_downsample(self): """Test downsampling of AreaDefinitions.""" area = geometry.AreaDefinition('areaD', 'Europe (3km, HRV, VTC)', 'areaD', @@ -2926,23 +2874,9 @@ def test_downsample(self): # Test invalid arguments self.assertRaises(ValueError, area.downsample, 0, 0) - self.assertRaises(ValueError, area.downsample, 0, 1) - self.assertRaises(ValueError, area.downsample, 1, 0) self.assertRaises(ValueError, area.downsample, -1, -1) - - # Test raise NotImplementedError for GEO - # area_geo = geometry.AreaDefinition(area_id='seviri', - # description='SEVIRI HRIT like (flipped, south up)', - # proj_id='seviri', - # projection={'proj': 'geos', - # 'lon_0': 0.0, - # 'a': 6378169.00, - # 'b': 6356583.80, - # 'h': 35785831.00, - # 'units': 'm'}, - # width=123, height=123, - # area_extent=[5500000, 5500000, -5500000, -5500000]) - # self.assertRaises(NotImplementedError, area_geo.downsample, 2, 2) + self.assertRaises(ValueError, area.downsample, 0.5, 1) + self.assertRaises(ValueError, area.downsample, 1, 0.5) def test_upsample(self): """Test upsampling of AreaDefinitions.""" @@ -2973,25 +2907,11 @@ def test_upsample(self): # Test invalid arguments self.assertRaises(ValueError, area.upsample, 0, 0) self.assertRaises(ValueError, area.upsample, 0, 1) - self.assertRaises(ValueError, area.upsample, 1, 0) - self.assertRaises(ValueError, area.upsample, -1, -1) - - # # Test raise NotImplementedError for GEO - # area_geo = geometry.AreaDefinition(area_id='seviri', - # description='SEVIRI HRIT like (flipped, south up)', - # proj_id='seviri', - # projection={'proj': 'geos', - # 'lon_0': 0.0, - # 'a': 6378169.00, - # 'b': 6356583.80, - # 'h': 35785831.00, - # 'units': 'm'}, - # width=123, height=123, - # area_extent=[5500000, 5500000, -5500000, -5500000]) - # self.assertRaises(NotImplementedError, area_geo.upsample, 2, 2) - - def test_reduce(self): - """Test reduction of AreaDefinitions.""" + self.assertRaises(ValueError, area.upsample, 0.5, 1) + self.assertRaises(ValueError, area.upsample, 1, 0.5) + + def test_shrink(self): + """Test shrinkage of AreaDefinitions.""" from pyresample import geometry area = geometry.AreaDefinition('areaD', 'Europe (3km, HRV, VTC)', 'areaD', {'a': '6378144.0', @@ -3006,22 +2926,24 @@ def test_reduce(self): -909968.64000000001, 1029087.28, 1490031.3600000001]) - x = 8 - y = 2 - res = area.reduce(x=x, y=y) + left = 8 + right = 8 + bottom = 2 + top = 2 + res = area.shrink(left, right, bottom, top) self.assertDictEqual(res.proj_dict, area.proj_dict) - self.assertEqual(res.shape[0], area.shape[0] - y * 2) - self.assertEqual(res.shape[1], area.shape[1] - x * 2) + self.assertEqual(res.shape[0], area.shape[0] - bottom - top) + self.assertEqual(res.shape[1], area.shape[1] - left - right) # Test skip reduction - res = area.reduce(x=0, y=0) + res = area.shrink() np.testing.assert_allclose(res.area_extent, area.area_extent) assert res == area # Test invalid arguments - self.assertRaises(ValueError, area.reduce, -1, -1) - self.assertRaises(ValueError, area.reduce, area.shape[0] / 2, 0) - self.assertRaises(ValueError, area.reduce, 0, area.shape[1] / 2) + self.assertRaises(ValueError, area.shrink, -1, -1, -1, -1) + self.assertRaises(ValueError, area.shrink, area.shape[0] / 2, area.shape[0] / 2, 0, 0) + self.assertRaises(ValueError, area.shrink, 0, 0, area.shape[1] / 2, area.shape[1] / 2) # Test raise NotImplementedError for GEO area_geo = geometry.AreaDefinition(area_id='seviri', @@ -3035,7 +2957,7 @@ def test_reduce(self): 'units': 'm'}, width=123, height=123, area_extent=[5500000, 5500000, -5500000, -5500000]) - self.assertRaises(NotImplementedError, area_geo.reduce, 1, 1) + self.assertRaises(NotImplementedError, area_geo.shrink, 1, 1, 0, 0) def test_extend(self): """Test extension of AreaDefinitions.""" @@ -3053,20 +2975,22 @@ def test_extend(self): -909968.64000000001, 1029087.28, 1490031.3600000001]) - x = 8 - y = 2 - res = area.extend(x=x, y=y) + left = 8 + right = 8 + bottom = 2 + top = 2 + res = area.extend(left, right, bottom, top) self.assertDictEqual(res.proj_dict, area.proj_dict) - self.assertEqual(res.shape[0], area.shape[0] + y * 2) - self.assertEqual(res.shape[1], area.shape[1] + x * 2) + self.assertEqual(res.shape[0], area.shape[0] + top + bottom) + self.assertEqual(res.shape[1], area.shape[1] + left + right) # Test skip extension - res = area.extend(x=0, y=0) + res = area.extend() np.testing.assert_allclose(res.area_extent, area.area_extent) assert res == area # Test invalid arguments - self.assertRaises(ValueError, area.extend, -1, -1) + self.assertRaises(ValueError, area.extend, -1, -1, -1, -1) # Test raise NotImplementedError for GEO area_geo = geometry.AreaDefinition(area_id='seviri', @@ -3080,7 +3004,97 @@ def test_extend(self): 'units': 'm'}, width=123, height=123, area_extent=[5500000, 5500000, -5500000, -5500000]) - self.assertRaises(NotImplementedError, area_geo.extend, 1, 1) + self.assertRaises(NotImplementedError, area_geo.extend, 1, 1, 0, 0) + + +class TestSwathDefinitionDownsampling(unittest.TestCase): + """Test Downsampling SwathDefinition.""" + + # TODO: Martin + @classmethod + def setUpClass(cls): + """Do some setup for the test class.""" + import dask.array as da + import numpy as np + import xarray as xr + + from pyresample.geometry import SwathDefinition + resolution = 3 + lats = np.array([[0, 0, 0, 0], [1, 1, 1, 1.0]]) + lons = np.array([[178.5, 179.5, -179.5, -178.5], [178.5, 179.5, -179.5, -178.5]]) + lats_dask = da.from_array(lats, chunks=2) + lons_dask = da.from_array(lons, chunks=2) + lats_xr = xr.DataArray(lats, dims=['y', 'x'], + attrs={'resolution': resolution}) + lons_xr = xr.DataArray(lons, dims=['y', 'x'], + attrs={'resolution': resolution}) + lats_xr_dask = xr.DataArray(lats_dask, dims=['y', 'x'], + attrs={'resolution': resolution}) + lons_xr_dask = xr.DataArray(lons_dask, dims=['y', 'x'], + attrs={'resolution': resolution}) + sd_np = SwathDefinition(lons, lats) + sd_xr = SwathDefinition(lons_xr, lats_xr) + sd_xr_dask = SwathDefinition(lons_xr_dask, lats_xr_dask) + + cls.lons = lons + cls.lats = lats + cls.lons_dask = lons_dask + cls.lats_dask = lats_dask + cls.lons_xr = lons_xr + cls.lats_xr = lons_xr + cls.lons_xr_dask = lons_xr_dask + cls.lats_xr_dask = lats_xr_dask + cls.sd_np = sd_np + cls.sd_xr = sd_xr + cls.sd_xr_dask = sd_xr_dask + cls.resolution = resolution + + def test_downsampling_keeps_arrays(self): + """Test array format is kept.""" + import dask.array as da + import numpy as np + import xarray as xr + + window_size = 2 + res_np = self.sd_np.downsample(y=window_size, x=window_size) + res_xr = self.sd_xr.downsample(y=window_size, x=window_size) + res_xr_dask = self.sd_xr_dask.downsample(y=window_size, x=window_size) + + assert isinstance(res_np.lats, np.ndarray) + assert isinstance(res_xr.lats, xr.DataArray) and isinstance(res_xr.lats.data, np.ndarray) + assert isinstance(res_xr_dask.lats, xr.DataArray) and isinstance(res_xr_dask.lats.data, da.Array) + + def test_downsampling_results_consistency(self): + """Test array format is kept.""" + import numpy as np + window_size = 2 + res_np = self.sd_np.downsample(y=window_size, x=window_size) + res_xr = self.sd_xr.downsample(y=window_size, x=window_size) + res_xr_dask = self.sd_xr_dask.downsample(y=window_size, x=window_size) + + np.testing.assert_allclose(res_np.lons, [[179, -179]]) + np.testing.assert_allclose(res_np.lats, [[0.5, 0.5]], atol=2e-5) + np.testing.assert_allclose(res_xr.lons.data, res_np.lons) + np.testing.assert_allclose(res_xr.lats.data, res_np.lats) + np.testing.assert_allclose(res_xr_dask.lons.values, res_np.lons) + np.testing.assert_allclose(res_xr_dask.lats.values, res_np.lats) + + def test_downsampling_modify_resolution_attrs(self): + window_size = 2 + res_xr = self.sd_xr.downsample(y=window_size, x=window_size) + self.assertAlmostEqual(res_xr.lons.resolution, self.resolution * window_size) + self.assertAlmostEqual(res_xr.lats.resolution, self.resolution * window_size) + + def test_downsampling_default_skip(self): + import numpy as np + res_np = self.sd_np.downsample(y=1, x=1) + np.testing.assert_allclose(res_np.lats, self.sd_np.lats) + np.testing.assert_allclose(res_np.lons, self.sd_np.lons) + + def test_downsampling_valid_args(self): + # Test invalid arguments + self.assertRaises(ValueError, self.sd_np.downsample, 0, 0) + self.assertRaises(ValueError, self.sd_np.downsample, -1, -1) def test_enclose_areas(): diff --git a/pyresample/test/test_utils_array.py b/pyresample/test/test_utils_array.py new file mode 100644 index 000000000..a43506f43 --- /dev/null +++ b/pyresample/test/test_utils_array.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pyresample, Resampling of remote sensing image data in python +# +# Copyright (C) 2010-2022 Pyresample developers +# +# This program is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see . +"""Test utils array .""" +import unittest + +import dask.array as da +import numpy as np +import xarray as xr + +VALID_FORMAT = ['Numpy', 'Dask', 'DataArray_Numpy', 'DataArray_Dask'] + +lons_np = np.arange(-179.5, -177.5, 0.5) +lats_np = np.arange(-89.5, -88.0, 0.5) +lons_np, lats_np = np.meshgrid(lons_np, lats_np) +lats_dask = da.from_array(lats_np, chunks=2) +lats_xr = xr.DataArray(lats_np, dims=['y', 'x']) +lats_xr_dask = xr.DataArray(lats_dask, dims=['y', 'x']) +dict_format = {'Numpy': lats_np, + 'Dask': lats_dask, + 'DataArray_Numpy': lats_xr, + 'DataArray_Dask': lats_xr_dask + } + + +class TestArrayConversion(unittest.TestCase): + """Unit testing the array conversion.""" + + def test_numpy_conversion(self): + from pyresample.utils.array import _numpy_conversion + in_format = "Numpy" + dims = None + for out_format in VALID_FORMAT: + out_arr = _numpy_conversion(dict_format[in_format], to=out_format, dims=dims) + assert isinstance(out_arr, type(dict_format[out_format])) + if out_format.lower() == "dataarray_numpy": + assert isinstance(out_arr.data, type(dict_format['Numpy'])) + if out_format.lower() == "dataarray_dask": + assert isinstance(out_arr.data, type(dict_format['Dask'])) + # Test raise errors + self.assertRaises(TypeError, _numpy_conversion, dict_format[in_format], ['unvalid_type']) + self.assertRaises(TypeError, _numpy_conversion, ['unvalid_type'], in_format) + self.assertRaises(ValueError, _numpy_conversion, dict_format[in_format], 'unvalid_format') + + def test_dask_conversion(self): + from pyresample.utils.array import _dask_conversion + in_format = "Dask" + dims = None + for out_format in VALID_FORMAT: + out_arr = _dask_conversion(dict_format[in_format], to=out_format, dims=dims) + assert isinstance(out_arr, type(dict_format[out_format])) + if out_format.lower() == "dataarray_numpy": + assert isinstance(out_arr.data, type(dict_format['Numpy'])) + if out_format.lower() == "dataarray_dask": + assert isinstance(out_arr.data, type(dict_format['Dask'])) + # Test raise errors + self.assertRaises(TypeError, _dask_conversion, dict_format[in_format], ['unvalid_type']) + self.assertRaises(TypeError, _dask_conversion, ['unvalid_type'], in_format) + self.assertRaises(ValueError, _dask_conversion, dict_format[in_format], 'unvalid_format') + + def test_xr_numpy_conversion(self): + from pyresample.utils.array import _xr_numpy_conversion + in_format = "DataArray_Numpy" + for out_format in VALID_FORMAT: + out_arr = _xr_numpy_conversion(dict_format[in_format], to=out_format) + assert isinstance(out_arr, type(dict_format[out_format])) + if out_format.lower() == "dataarray_numpy": + assert isinstance(out_arr.data, type(dict_format['Numpy'])) + if out_format.lower() == "dataarray_dask": + assert isinstance(out_arr.data, type(dict_format['Dask'])) + # Test raise errors + self.assertRaises(TypeError, _xr_numpy_conversion, dict_format[in_format], ['unvalid_type']) + self.assertRaises(TypeError, _xr_numpy_conversion, ['unvalid_type'], in_format) + self.assertRaises(ValueError, _xr_numpy_conversion, dict_format[in_format], 'unvalid_format') + + def test_xr_dask_conversion(self): + from pyresample.utils.array import _xr_dask_conversion + in_format = "DataArray_Dask" + for out_format in VALID_FORMAT: + out_arr = _xr_dask_conversion(dict_format[in_format], to=out_format) + assert isinstance(out_arr, type(dict_format[out_format])) + if out_format.lower() == "dataarray_numpy": + assert isinstance(out_arr.data, type(dict_format['Numpy'])) + if out_format.lower() == "dataarray_dask": + assert isinstance(out_arr.data, type(dict_format['Dask'])) + # Test raise errors + self.assertRaises(TypeError, _xr_dask_conversion, dict_format[in_format], ['unvalid_type']) + self.assertRaises(TypeError, _xr_dask_conversion, ['unvalid_type'], in_format) + self.assertRaises(ValueError, _xr_dask_conversion, dict_format[in_format], 'unvalid_format') + + def test_convert_2D_array(self): + """Test conversion of 2D arrays between various formats.""" + from pyresample.utils.array import _convert_2D_array + dims = None + for in_format in VALID_FORMAT: + for out_format in VALID_FORMAT: + out_arr, src_format = _convert_2D_array(dict_format[in_format], to=out_format, dims=dims) + assert isinstance(out_arr, type(dict_format[out_format])) + assert src_format.lower() == in_format.lower() + if out_format.lower() == "dataarray_numpy": + assert isinstance(out_arr.data, type(dict_format['Numpy'])) + if out_format.lower() == "dataarray_dask": + assert isinstance(out_arr.data, type(dict_format['Dask'])) + # Test raise errors + self.assertRaises(TypeError, _convert_2D_array, dict_format['Numpy'], ['unvalid_type']) + self.assertRaises(TypeError, _convert_2D_array, [dict_format['Numpy']], 'numpy') + self.assertRaises(ValueError, _convert_2D_array, dict_format['Numpy'], 'unvalid_format') diff --git a/pyresample/utils/array.py b/pyresample/utils/array.py new file mode 100644 index 000000000..0d91986f5 --- /dev/null +++ b/pyresample/utils/array.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2019-2021 Pyresample developers +# +# This program is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with this program. If not, see . +"""Utilities for converting array.""" + +import dask.array as da +import numpy as np +import xarray as xr + +VALID_FORMAT = ['Numpy', 'Dask', 'DataArray_Numpy', 'DataArray_Dask'] + + +def _numpy_conversion(arr, to, dims=None): + if not isinstance(to, str): + raise TypeError("'to' must be a string indicating the conversion array format.") + if not isinstance(arr, np.ndarray): + raise TypeError("_numpy_conversion expects a np.ndarray as input.") + if not np.isin(to.lower(), np.char.lower(VALID_FORMAT)): + raise ValueError("Valid _numpy_conversion array formats are {}".format(VALID_FORMAT)) + if to.lower() == 'numpy': + dst_arr = arr + elif to.lower() == 'dask': + dst_arr = da.from_array(arr) + elif to.lower() == 'dataarray_numpy': + dst_arr = xr.DataArray(arr, dims=dims) + else: # to.lower() == 'dataarray_dask': + dst_arr = xr.DataArray(da.from_array(arr), dims=dims) + return dst_arr + + +def _dask_conversion(arr, to, dims=None): + if not isinstance(to, str): + raise TypeError("'to' must be a string indicating the conversion array format.") + if not isinstance(arr, da.Array): + raise TypeError("_dask_conversion expects a dask.Array as input.") + if not np.isin(to.lower(), np.char.lower(VALID_FORMAT)): + raise ValueError("Valid _dask_conversion array formats are {}".format(VALID_FORMAT)) + if to.lower() == 'numpy': + dst_arr = arr.compute() + elif to.lower() == 'dask': + dst_arr = arr + elif to.lower() == 'dataarray_numpy': + dst_arr = xr.DataArray(arr.compute(), dims=dims) + else: # to.lower() == 'dataarray_dask': + dst_arr = xr.DataArray(arr, dims=dims) + return dst_arr + + +def _xr_numpy_conversion(arr, to): + if not isinstance(to, str): + raise TypeError("'to' must be a string indicating the conversion array format.") + if not isinstance(arr, xr.DataArray): + raise TypeError("_xr_numpy_conversion expects a xr.DataArray with numpy array as input.") + if not isinstance(arr.data, np.ndarray): + raise TypeError("_xr_numpy_conversion expects a xr.DataArray with numpy array as input.") + if not np.isin(to.lower(), np.char.lower(VALID_FORMAT)): + raise ValueError("Valid _xr_numpy_conversion array formats are {}".format(VALID_FORMAT)) + if to.lower() == 'numpy': + dst_arr = arr.data + elif to.lower() == 'dask': + dst_arr = da.from_array(arr.data) + elif to.lower() == 'dataarray_numpy': + dst_arr = arr + else: # to.lower() == 'dataarray_dask': + dst_arr = xr.DataArray(da.from_array(arr.data), dims=arr.dims) + return dst_arr + + +def _xr_dask_conversion(arr, to): + if not isinstance(to, str): + raise TypeError("'to' must be a string indicating the conversion array format.") + if not isinstance(arr, xr.DataArray): + raise TypeError("_xr_dask_conversion expects a xr.DataArray with dask.Array as input.") + if not isinstance(arr.data, da.Array): + raise TypeError("_xr_dask_conversion expects a xr.DataArray with dask.Array as input.") + if not np.isin(to.lower(), np.char.lower(VALID_FORMAT)): + raise ValueError("Valid _xr_dask_conversion array formats are {}".format(VALID_FORMAT)) + if to.lower() == 'numpy': + dst_arr = arr.data.compute() + elif to.lower() == 'dask': + dst_arr = arr.data + elif to.lower() == 'dataarray_numpy': + dst_arr = arr.compute() + else: # to.lower() == 'dataarray_dask': + dst_arr = arr + return dst_arr + + +def _convert_2D_array(arr, to, dims=None): + """ + Convert a 2D array to a specific format. + + Useful to return swath lons, lats in the same original format after processing. + + Parameters + ---------- + arr : (np.ndarray, da.Array, xr.DataArray) + The 2D array to be converted to another array format. + to : TYPE + The desired array output format. + Accepted formats are: ['Numpy','Dask', 'DataArray_Numpy','DataArray_Dask'] + dims : tuple, optional + Optional argument for the specification of xr.DataArray dimension names + if the input array is Numpy or Dask. + Does not have any impact if the input is already a xr.DataArray + Provide a tuple with (y_dimname, x_dimname). + The default is None --> (dim_0, dim_1) + + + Returns + ------- + dst_arr : (np.ndarray, da.Array, xr.DataArray) + The converted 2D array. + src_format: str + The source format of the 2D array. + + """ + # Checks + if not isinstance(to, str): + raise TypeError("'to' must be a string indicating the conversion array format.") + if not np.isin(to.lower(), np.char.lower(VALID_FORMAT)): + raise ValueError("Valid conversion array formats are {}".format(VALID_FORMAT)) + if not isinstance(arr, (np.ndarray, da.Array, xr.DataArray)): + raise TypeError("The provided array must be either a np.ndarray, a dask.Array or a xr.DataArray.") + # Numpy + if isinstance(arr, np.ndarray): + return _numpy_conversion(arr, to=to, dims=dims), "numpy" + # Dask + elif isinstance(arr, da.Array): + return _dask_conversion(arr, to=to, dims=dims), 'dask' + # DataArray_Numpy + elif isinstance(arr, xr.DataArray) and isinstance(arr.data, np.ndarray): + return _xr_numpy_conversion(arr, to=to), 'DataArray_Numpy' + # DataArray_Dask + else: # isinstance(arr, xr.DataArray) and isinstance(arr.data, da.Array): + return _xr_dask_conversion(arr, to=to), 'DataArray_Dask'