diff --git a/CMakeLists.txt b/CMakeLists.txt index 906727f06..3befe53bc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -95,6 +95,7 @@ option (PIO_USE_MPIIO "Enable support for MPI-IO auto detect" ON) option (PIO_USE_MPISERIAL "Enable mpi-serial support (instead of MPI)" OFF) option (PIO_USE_PNETCDF_VARD "Use pnetcdf put_vard " OFF) option (WITH_PNETCDF "Require the use of PnetCDF" ON) +option (PIO_USE_GDAL "Enable support for Geospatial Data Abstraction Library ON") option (BUILD_SHARED_LIBS "Build shared libraries" OFF) if(APPLE) @@ -110,6 +111,14 @@ else() set(USE_VARD 0) endif() +# Set a variable that appears in the config.h.in file. +if(PIO_USE_GDAL) + set(USE_GDAL 1) +else() + set(USE_GDAL 0) +endif() + + # Set a variable that appears in the config.h.in file. if(PIO_ENABLE_LOGGING) set(ENABLE_LOGGING 1) diff --git a/cmake_config.h.in b/cmake_config.h.in index 68b9e8dc4..7463129bc 100644 --- a/cmake_config.h.in +++ b/cmake_config.h.in @@ -46,4 +46,6 @@ /* Does PIO support using pnetcdf for I/O? */ #cmakedefine _PNETCDF +#cmakedefine USE_GDAL + #endif /* _PIO_CONFIG_ */ diff --git a/src/clib/CMakeLists.txt b/src/clib/CMakeLists.txt index 609de6f57..6e1eceee7 100644 --- a/src/clib/CMakeLists.txt +++ b/src/clib/CMakeLists.txt @@ -28,6 +28,10 @@ set (src topology.c pio_file.c pioc_support.c pio_lists.c if (NETCDF_INTEGRATION) set (src ${src} ../ncint/nc_get_vard.c ../ncint/ncintdispatch.c ../ncint/ncint_pio.c ../ncint/nc_put_vard.c) endif () +# This needs to be in an IF statement. using GDAL_INTEGRATION. But haven't figured that out yet. +if (USE_GDAL) + set (src ${src} pio_gdal.c) +endif() add_library (pioc ${src}) @@ -173,6 +177,14 @@ if (PIO_USE_MPISERIAL) endif () endif () +#===== GDAL ===== +if (USE_GDAL) + target_include_directories (pioc + PUBLIC ${GDAL_INCLUDE_DIR}) + target_link_libraries (pioc + PUBLIC "-L${GDAL_LIBRARY_PATH} -lgdal") +endif () + include(CheckTypeSize) check_type_size("size_t" SIZEOF_SIZE_T) CHECK_TYPE_SIZE("long long" SIZEOF_LONG_LONG) diff --git a/src/clib/pio.h b/src/clib/pio.h index a4b20b752..e4bd33ed5 100644 --- a/src/clib/pio.h +++ b/src/clib/pio.h @@ -19,6 +19,8 @@ #include #include +#include + #define NETCDF_VERSION_LE(Maj, Min, Pat) \ (((NC_VERSION_MAJOR == Maj) && (NC_VERSION_MINOR == Min) && (NC_VERSION_PATCH <= Pat)) || \ ((NC_VERSION_MAJOR == Maj) && (NC_VERSION_MINOR < Min)) || (NC_VERSION_MAJOR < Maj)) @@ -606,6 +608,13 @@ typedef struct file_desc_t * feature. One consequence is that PIO_IOTYPE_NETCDF4C files will * not have deflate automatically turned on for each var. */ int ncint_file; + + /** GDAL specific vars - M.Long */ + GDALDatasetH *hDS; + int dateVarID; // Index of field with type OFTDate + int timeVarID; // Index of field with type OFTTime + int datetimeVarID; // Index of field with type OFTDatetime + } file_desc_t; /** @@ -624,7 +633,10 @@ enum PIO_IOTYPE PIO_IOTYPE_NETCDF4C = 3, /** NetCDF4 (HDF5) parallel */ - PIO_IOTYPE_NETCDF4P = 4 + PIO_IOTYPE_NETCDF4P = 4, + + /** GDAL (serial only) */ + PIO_IOTYPE_GDAL = 5 }; /** @@ -1357,6 +1369,15 @@ extern "C" { int nc_put_vard_ulonglong(int ncid, int varid, int decompid, const size_t recnum, const unsigned long long *op); + /* These functions are for the GDAL integration layer. MSL - 9/7/2023 */ + int GDALc_inq_fieldid(int fileid, const char *name, int *varidp); + int GDALc_inq_timeid(int fileid, int *timeid); // Is there a field of type OFTDate, OFTTime, or OFTDateTime? + int GDALc_openfile(int iosysid, int *fileIDp, GDALDatasetH *hDSp, int *iotype, const char *fname, bool mode); + int GDALc_sync(int fileid); + int GDALc_shp_get_int_field(int fileid); + int GDALc_shp_get_float_field(int fileid, int varid, const size_t *startp, const size_t *countp, float *ip); + int GDALc_shp_get_double_field(int fileid, int varid, const size_t *startp, const size_t *countp, double *ip); + #if defined(__cplusplus) } #endif diff --git a/src/clib/pio_darray.c b/src/clib/pio_darray.c index e7274da3e..3bf8abe34 100644 --- a/src/clib/pio_darray.c +++ b/src/clib/pio_darray.c @@ -181,7 +181,7 @@ PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, /* Run these on all tasks if async is not in use, but only on * non-IO tasks if async is in use. */ - if (!ios->async || !ios->ioproc) + if ((!ios->async || !ios->ioproc) && (file->iotype != PIO_IOTYPE_GDAL)) { /* Get the number of dims for this var. */ PLOG((3, "about to call PIOc_inq_varndims varids[0] = %d", varids[0])); @@ -949,6 +949,12 @@ PIOc_read_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, if ((ierr = pio_read_darray_nc(file, iodesc, varid, iobuf))) return pio_err(ios, file, ierr, __FILE__, __LINE__); break; +#ifdef USE_GDAL + case PIO_IOTYPE_GDAL: + if ((ierr = pio_read_darray_shp(file, iodesc, varid, iobuf))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); + break; +#endif default: return pio_err(NULL, NULL, PIO_EBADIOTYPE, __FILE__, __LINE__); } diff --git a/src/clib/pio_file.c b/src/clib/pio_file.c index e9edb6af4..328815a55 100644 --- a/src/clib/pio_file.c +++ b/src/clib/pio_file.c @@ -283,6 +283,12 @@ PIOc_closefile(int ncid) ierr = ncmpi_close(file->fh); break; #endif + case PIO_IOTYPE_GDAL: + if (ios->io_rank == 0) { + ierr = GDALClose((void*)file->hDS); + printf("GDALClose ierr: %d\n",ierr); + } + break; default: return pio_err(ios, file, PIO_EBADIOTYPE, __FILE__, __LINE__); } diff --git a/src/clib/pio_gdal.c b/src/clib/pio_gdal.c new file mode 100644 index 000000000..43a31b81d --- /dev/null +++ b/src/clib/pio_gdal.c @@ -0,0 +1,815 @@ +#include +#include +#include +#ifdef USE_GDAL +#include + +/** + * The PIO library maintains its own set of ncids. This is the next + * ncid number that will be assigned. + */ +extern int pio_next_ncid; + +static int +GDALc_inq_file_metadata(file_desc_t *file, GDALDatasetH hDS, int iotype, int *nvars, + int **rec_var, int **pio_type, int **pio_type_size, + MPI_Datatype **mpi_type, int **mpi_type_size, int **ndims) +{ + int mpierr; + int ret; + + /* Check inputs. */ + pioassert(rec_var && pio_type && pio_type_size && mpi_type && mpi_type_size, + "pointers must be provided", __FILE__, __LINE__); + + if (hDS == NULL) { + return pio_err(file->iosystem, file, -999, __FILE__, __LINE__); + } + /* How many vars in the file? */ + if (iotype == PIO_IOTYPE_GDAL) + { + OGRLayerH hLayer = OGR_DS_GetLayer( hDS, 0 ); + OGR_L_ResetReading(hLayer); + + OGRFeatureDefnH hFD = OGR_L_GetLayerDefn(hLayer); + *nvars = OGR_FD_GetFieldCount(hFD); + if (nvars == 0) // empty file + return pio_err(NULL, file, PIO_ENOMEM, __FILE__, __LINE__); + + /* Allocate storage for info about each var. */ + if (*nvars) + { + if (!(*rec_var = malloc(*nvars * sizeof(int)))) + return PIO_ENOMEM; + if (!(*pio_type = malloc(*nvars * sizeof(int)))) + return PIO_ENOMEM; + if (!(*pio_type_size = malloc(*nvars * sizeof(int)))) + return PIO_ENOMEM; + if (!(*mpi_type = malloc(*nvars * sizeof(MPI_Datatype)))) + return PIO_ENOMEM; + if (!(*mpi_type_size = malloc(*nvars * sizeof(int)))) + return PIO_ENOMEM; + if (!(*ndims = malloc(*nvars * sizeof(int)))) + return PIO_ENOMEM; + } + + /* Learn about each variable in the file. */ + for (int v = 0; v < *nvars; v++) + { + int var_ndims; /* Number of dims for this var. */ + nc_type my_type; + + /* Find type of the var and number of dims in this var. Also + * learn about type. */ + size_t type_size; + + // + var_ndims = 1; // FIXED FOR NOW. For data-read purposes, it's a 1D stream across the number of + // elements. + (*ndims)[v] = var_ndims; + //>> if ((ret = nc_inq_var(ncid, v, NULL, &my_type, &var_ndims, NULL, NULL))) + //>> return pio_err(NULL, file, ret, __FILE__, __LINE__); + OGRFieldType Fld = OGR_Fld_GetType(OGR_FD_GetFieldDefn(hFD,v)); + + bool typeOK = true; // assume we're good + switch (Fld) { + case OFTReal: + (*pio_type)[v] = (int)PIO_DOUBLE; + break; + case OFTInteger: + (*pio_type)[v] = (int)PIO_INT; + break; + // This needs to be done. How do we deal with timestamps etc in GDAL vector fields? + //>>case OFTDate: + //>> break; + //>> case OFTTime: + //>> break; + //>> case OFTDate: + //>> break; + //>> case OFTDateTime: + default: + typeOK = false; + break; +// return pio_err(file->iosystem, file, PIO_EBADIOTYPE, __FILE__, __LINE__); + } + //>> if ((ret = nc_inq_type(ncid, (*pio_type)[v], NULL, &type_size))) + //>> return check_netcdf(file, ret, __FILE__, __LINE__); + //>> (*pio_type_size)[v] = type_size; + + if (!typeOK) // Not a usable type + continue; + + /* Get the MPI type corresponding with the PIO type. */ + if ((ret = find_mpi_type((*pio_type)[v], &(*mpi_type)[v], NULL))) + return pio_err(NULL, file, ret, __FILE__, __LINE__); + + /* Get the size of the MPI type. */ + if ((*mpi_type)[v] == MPI_DATATYPE_NULL) + (*mpi_type_size)[v] = 0; + else + if ((mpierr = MPI_Type_size((*mpi_type)[v], &(*mpi_type_size)[v]))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + + } /* next var */ + } /* If PIO_TYPE_GDAL */ + return PIO_NOERR; +} + +/** + * The PIO-C interface for the GDAL function OGR_L_FindFieldIndex() + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. + * + * @param ncid the ncid of the open file, obtained from + * GDALc_openfile() or GDALc_createfile(). + * @param name the field name. + * @param varidp a pointer that will get the variable id + * @return PIO_NOERR for success, error code otherwise. <> + * @ingroup PIO_inq_var_c + * @author Michael Long (adapted from Jim Edwards, Ed Hartnett) + */ +int +GDALc_inq_fieldid(int fileid, const char *name, int *fieldidp) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + /* Get file info based on fileid. */ + if ((ierr = pio_get_file(fileid, &file))) + return pio_err(NULL, NULL, ierr, __FILE__, __LINE__); + ios = file->iosystem; + + /* Caller must provide name. */ + if (!name || strlen(name) > NC_MAX_NAME) + return pio_err(ios, file, PIO_EINVAL, __FILE__, __LINE__); + + PLOG((1, "GDALc_inq_fieldid fileid = %d name = %s", fileid, name)); + + if (ios->async) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_INQ_VARID; + + if (ios->compmain == MPI_ROOT) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&fileid, 1, MPI_INT, ios->compmain, ios->intercomm); + int namelen; + namelen = strlen(name); + if (!mpierr) + mpierr = MPI_Bcast(&namelen, 1, MPI_INT, ios->compmain, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, ios->compmain, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(NULL, file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the GDAL function. */ + if (ios->ioproc) + { + GDALDatasetH *hDS = file->hDS; + if (file->do_io) { // We assume that its a GDAL file + OGRLayerH hLayer = OGR_DS_GetLayer( hDS, 0 ); + OGR_L_ResetReading(hLayer); + if (hLayer == NULL) { + printf("Layer is NULL"); + return -1; + } + *fieldidp = OGR_L_FindFieldIndex(hLayer,name,1); + + pioassert(*fieldidp > 0, "variable not found", __FILE__, __LINE__); + + } + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + if (ierr) + return check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Broadcast results to all tasks. Ignore NULL parameters. */ + if (fieldidp) + if ((mpierr = MPI_Bcast(fieldidp, 1, MPI_INT, ios->ioroot, ios->my_comm))) + check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + + return PIO_NOERR; +} + +int +GDALc_openfile(int iosysid, int *fileIDp, GDALDatasetH *hDSp,int *iotype, const char *filename, bool mode) +//GDALc_openfile(int iosysid, GDALDatasetH *hDSp, int *iotype, const char *filename, bool mode) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int imode; /* Internal mode val for netcdf4 file open. */ + int nvars = 0; + int *rec_var = NULL; + int *pio_type = NULL; + int *pio_type_size = NULL; + MPI_Datatype *mpi_type = NULL; + int *mpi_type_size = NULL; + int *ndims = NULL; + int mpierr = MPI_SUCCESS, mpierr2; /** Return code from MPI function codes. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + GDALDatasetH hDS; + +#ifdef USE_MPE + pio_start_mpe_log(OPEN); +#endif /* USE_MPE */ + + /* Get the IO system info from the iosysid. */ + if (!(ios = pio_get_iosystem_from_id(iosysid))) + return pio_err(NULL, NULL, PIO_EBADID, __FILE__, __LINE__); + + /* User must provide valid input for these parameters. */ + if (!hDSp || !iotype || !filename) + return pio_err(ios, NULL, PIO_EINVAL, __FILE__, __LINE__); + if (*iotype != PIO_IOTYPE_GDAL ) + return pio_err(ios, NULL, PIO_EINVAL, __FILE__, __LINE__); + +// PLOG((2, "PIOc_openfile_retry iosysid = %d iotype = %d filename = %s mode = %d retry = %d", +// iosysid, *iotype, filename, mode, retry)); + + /* Allocate space for the file info. */ + if (!(file = calloc(sizeof(*file), 1))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + + /* Fill in some file values. */ + file->fh = -1; + file->iotype = *iotype; + file->iosystem = ios; + file->writable = (mode & PIO_WRITE) ? 1 : 0; + + /* Set to true if this task should participate in IO (only true + * for one task with netcdf serial files. */ + if (file->iotype == PIO_IOTYPE_GDAL && ios->io_rank == 0) + file->do_io = 1; + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async) + { + int msg = PIO_MSG_OPEN_FILE; + size_t len = strlen(filename); + + if (!ios->ioproc) + { + /* Send the message to the message handler. */ + if (ios->compmain == MPI_ROOT) + mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); + + /* Send the parameters of the function call. */ + if (!mpierr) + mpierr = MPI_Bcast(&len, 1, MPI_INT, ios->compmain, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((void *)filename, len + 1, MPI_CHAR, ios->compmain, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&file->iotype, 1, MPI_INT, ios->compmain, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&mode, 1, MPI_INT, ios->compmain, ios->intercomm); +//>> if (!mpierr) +//>> mpierr = MPI_Bcast(&use_ext_ncid, 1, MPI_INT, ios->compmain, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + return check_mpi(NULL, file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { + switch (file->iotype) + { + case PIO_IOTYPE_GDAL: + if (ios->io_rank == 0) + { + *hDSp = OGROpen( filename, FALSE, NULL ); + if( hDSp != NULL ) + + ierr = GDALc_inq_file_metadata(file, *hDSp, PIO_IOTYPE_GDAL, + &nvars, &rec_var, &pio_type, + &pio_type_size, &mpi_type, + &mpi_type_size, &ndims); + PLOG((2, "GDALc_openfile:OGROpen for filename = %s mode = %d " + "ierr = %d", filename, mode, ierr)); + } + break; + + default: + return pio_err(ios, file, PIO_EBADIOTYPE, __FILE__, __LINE__); + } + + } + + /* Broadcast and check the return code. */ + if (ios->ioroot == ios->union_rank) + PLOG((2, "Bcasting error code ierr %d ios->ioroot %d ios->my_comm %d", + ierr, ios->ioroot, ios->my_comm)); + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + PLOG((2, "Bcast openfile_retry error code ierr = %d", ierr)); + + /* If there was an error, free allocated memory and deal with the error. */ + if (ierr) + { + free(file); + return PIO_NOERR;// check_netcdf2(ios, NULL, ierr, __FILE__, __LINE__); + } + + /* Broadcast writability to all tasks. */ + if ((mpierr = MPI_Bcast(&file->writable, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + + /* Broadcast some values to all tasks from io root. */ +//>> if (ios->async) +//>> { +//>> PLOG((3, "open bcasting pio_next_ncid %d ios->ioroot %d", pio_next_ncid, ios->ioroot)); +//>> if ((mpierr = MPI_Bcast(&pio_next_ncid, 1, MPI_INT, ios->ioroot, ios->my_comm))) +//>> return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); +//>> } + + if ((mpierr = MPI_Bcast(&nvars, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + + /* Non io tasks need to allocate to store info about variables. */ + if (nvars && !rec_var) + { + if (!(rec_var = malloc(nvars * sizeof(int)))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); + if (!(pio_type = malloc(nvars * sizeof(int)))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); + if (!(pio_type_size = malloc(nvars * sizeof(int)))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); + if (!(mpi_type = malloc(nvars * sizeof(MPI_Datatype)))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); + if (!(mpi_type_size = malloc(nvars * sizeof(int)))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); + if (!(ndims = malloc(nvars * sizeof(int)))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); + } + if (nvars) + { + if ((mpierr = MPI_Bcast(rec_var, nvars, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(pio_type, nvars, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(pio_type_size, nvars, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(mpi_type, nvars*(int)(sizeof(MPI_Datatype)/sizeof(int)), MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(mpi_type_size, nvars, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(ndims, nvars, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + } + + file->hDS = *hDSp; + + /* Create the ncid/fileid that the user will see. This is necessary + * because otherwise ids will be reused if files are opened + * on multiple iosystems. */ + file->pio_ncid = pio_next_ncid++; + *fileIDp=file->pio_ncid; + + /* Add this file to the list of currently open files. */ + pio_add_to_file_list(file); + + /* Add info about the variables to the file_desc_t struct. */ + for (int v = 0; v < nvars; v++) + if ((ierr = add_to_varlist(v, rec_var[v], pio_type[v], pio_type_size[v], + mpi_type[v], mpi_type_size[v], ndims[v], + &file->varlist))) + return pio_err(ios, NULL, ierr, __FILE__, __LINE__); + file->nvars = nvars; + + /* Free resources. */ + if (nvars) + { + if (rec_var) + free(rec_var); + if (pio_type) + free(pio_type); + if (pio_type_size) + free(pio_type_size); + if (mpi_type) + free(mpi_type); + if (mpi_type_size) + free(mpi_type_size); + if (ndims) + free(ndims); + } + +#ifdef USE_MPE + pio_stop_mpe_log(OPEN, __func__); +#endif /* USE_MPE */ + PLOG((2, "Opened file %s file->pio_ncid = %d file->fh = %d ierr = %d", + filename, file->pio_ncid, file->fh, ierr)); + + return ierr; +} + +int +GDALc_sync(int fileid) { + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ +} + +int GDALc_inq_timeid(int fileid, int *timeid) { // Is there a field of type OFTDate, OFTTime, or OFTDateTime? + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + /* Get file info based on ncid. */ + if ((ierr = pio_get_file(fileid, &file))) + return pio_err(NULL, NULL, ierr, __FILE__, __LINE__); + ios = file->iosystem; + if (file->hDS == NULL) + return pio_err(NULL, NULL, ierr, __FILE__, __LINE__); + + GDALDatasetH *hDS = file->hDS; + OGRLayerH hLayer = OGR_DS_GetLayer( hDS, 0 ); + if (hLayer == NULL) + return pio_err(NULL, NULL, ierr, __FILE__, __LINE__); + OGR_L_ResetReading(hLayer); + + OGRFeatureDefnH hFD = OGR_L_GetLayerDefn(hLayer); + if (hFD == NULL) + return pio_err(NULL, NULL, ierr, __FILE__, __LINE__); + int nFld = OGR_FD_GetFieldCount(hFD); + OGRFieldDefnH hTMP = OGR_FD_GetFieldDefn(hFD,1); + hTMP = OGR_FD_GetFieldDefn(hFD,0); + + for (int i=0;i<(file->nvars)-1;i++) { + OGRFieldDefnH hFlD = OGR_FD_GetFieldDefn(hFD,i); + OGRFieldType Fld = OGR_Fld_GetType(hFlD); + } + +// OGR_FD_Destroy(hFD); + + return 0; + } + +/** + * Read an array of data from a file to the (serial) IO library. This + * function is only used with netCDF classic and netCDF-4 serial + * iotypes. + * + * @param file a pointer to the open file descriptor for the file + * that will be written to + * @param iodesc a pointer to the defined iodescriptor for the buffer + * @param vid the variable id to be read. + * @param iobuf the buffer to be written from this mpi task. May be + * null. for example we have 8 ionodes and a distributed array with + * global size 4, then at least 4 nodes will have a null iobuf. In + * practice the box rearranger trys to have at least blocksize bytes + * on each io task and so if the total number of bytes to write is + * less than blocksize * numiotasks then some iotasks will have a NULL + * iobuf. + * @returns 0 for success, error code otherwise. + * @ingroup PIO_read_darray_c + * @author Jim Edwards, Ed Hartnett + */ +int +pio_read_darray_shp(file_desc_t *file, io_desc_t *iodesc, int vid, + void *iobuf) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + var_desc_t *vdesc; /* Information about the variable. */ + int ndims; /* Number of dims in decomposition. */ + int fndims; /* Number of dims for this var in file. */ + MPI_Status status; + int mpierr; /* Return code from MPI functions. */ + int ierr; + + /* Check inputs. */ + pioassert(file && file->iosystem && iodesc && vid >= 0 && vid <= PIO_MAX_VARS, + "invalid input", __FILE__, __LINE__); + + PLOG((2, "pio_read_darray_shp vid = %d", vid)); + ios = file->iosystem; + +#ifdef TIMING + /* Start timer if desired. */ + if ((ierr = pio_start_timer("PIO:read_darray_shp"))) + return pio_err(ios, NULL, ierr, __FILE__, __LINE__); +#endif /* TIMING */ + + /* Get var info for this var. */ + if ((ierr = get_var_desc(vid, &file->varlist, &vdesc))) + return pio_err(NULL, file, ierr, __FILE__, __LINE__); + + /* Get the number of dims in our decomposition. */ + ndims = iodesc->ndims; + + /* Get number of dims for this var. */ + fndims = vdesc->ndims; + + /* If setframe was not called, use a default value of 0. This is + * required for backward compatibility. */ + if (fndims == ndims + 1 && vdesc->record < 0) + vdesc->record = 0; + PLOG((3, "fndims %d ndims %d vdesc->record %d vdesc->ndims %d", fndims, + ndims, vdesc->record, vdesc->ndims)); + + /* Confirm that we are being called with the correct ndims. */ + pioassert((fndims == ndims && vdesc->record < 0) || + (fndims == ndims + 1 && vdesc->record >= 0), + "unexpected record", __FILE__, __LINE__); + + if (ios->ioproc) + { + io_region *region; + size_t start[fndims]; + size_t count[fndims]; + size_t tmp_start[fndims * iodesc->maxregions]; + size_t tmp_count[fndims * iodesc->maxregions]; + size_t tmp_bufsize; + void *bufptr; + + /* buffer is incremented by byte and loffset is in terms of + the iodessc->mpitype so we need to multiply by the size of + the mpitype. */ + region = iodesc->firstregion; + + /* If setframe was not set before this call, assume a value of + * 0. This is required for backward compatibility. */ + if (fndims > ndims) + if (vdesc->record < 0) + vdesc->record = 0; + + /* Put together start/count arrays for all regions. */ + for (int regioncnt = 0; regioncnt < iodesc->maxregions; regioncnt++) + { + if (!region || iodesc->llen == 0) + { + /* Nothing to write for this region. */ + for (int i = 0; i < fndims; i++) + { + tmp_start[i + regioncnt * fndims] = 0; + tmp_count[i + regioncnt * fndims] = 0; + } + bufptr = NULL; + } + else + { + if (vdesc->record >= 0 && fndims > 1) + { + /* This is a record var. Find start for record dims. */ + tmp_start[regioncnt * fndims] = vdesc->record; + + /* Find start/count for all non-record dims. */ + for (int i = 1; i < fndims; i++) + { + tmp_start[i + regioncnt * fndims] = region->start[i - 1]; + tmp_count[i + regioncnt * fndims] = region->count[i - 1]; + } + + /* Set count for record dimension. */ + if (tmp_count[1 + regioncnt * fndims] > 0) + tmp_count[regioncnt * fndims] = 1; + } + else + { + /* Non-time dependent array */ + for (int i = 0; i < fndims; i++) + { + tmp_start[i + regioncnt * fndims] = region->start[i]; + tmp_count[i + regioncnt * fndims] = region->count[i]; + } + } + } + +#if PIO_ENABLE_LOGGING + /* Log arrays for debug purposes. */ + PLOG((3, "region = %d", region)); + for (int i = 0; i < fndims; i++) + PLOG((3, "tmp_start[%d] = %d tmp_count[%d] = %d", i + regioncnt * fndims, tmp_start[i + regioncnt * fndims], + i + regioncnt * fndims, tmp_count[i + regioncnt * fndims])); +#endif /* PIO_ENABLE_LOGGING */ + + /* Move to next region. */ + if (region) + region = region->next; + } /* next regioncnt */ + + /* IO tasks other than 0 send their starts/counts and data to + * IO task 0. */ + if (ios->io_rank > 0) + { + if ((mpierr = MPI_Send(&iodesc->llen, 1, MPI_OFFSET, 0, ios->io_rank, ios->io_comm))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + PLOG((3, "sent iodesc->llen = %d", iodesc->llen)); + + if (iodesc->llen > 0) + { + if ((mpierr = MPI_Send(&(iodesc->maxregions), 1, MPI_INT, 0, + ios->num_iotasks + ios->io_rank, ios->io_comm))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Send(tmp_count, iodesc->maxregions * fndims, MPI_OFFSET, 0, + 2 * ios->num_iotasks + ios->io_rank, ios->io_comm))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Send(tmp_start, iodesc->maxregions * fndims, MPI_OFFSET, 0, + 3 * ios->num_iotasks + ios->io_rank, ios->io_comm))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + PLOG((3, "sent iodesc->maxregions = %d tmp_count and tmp_start arrays", iodesc->maxregions)); + + if ((mpierr = MPI_Recv(iobuf, iodesc->llen, iodesc->mpitype, 0, + 4 * ios->num_iotasks + ios->io_rank, ios->io_comm, &status))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + PLOG((3, "received %d elements of data", iodesc->llen)); + } + } + else if (ios->io_rank == 0) + { + /* This is IO task 0. Get starts/counts and data from + * other IO tasks. */ + int maxregions = 0; + size_t loffset, regionsize; + size_t this_start[fndims * iodesc->maxregions]; + size_t this_count[fndims * iodesc->maxregions]; + + for (int rtask = 1; rtask <= ios->num_iotasks; rtask++) + { + if (rtask < ios->num_iotasks) + { + if ((mpierr = MPI_Recv(&tmp_bufsize, 1, MPI_OFFSET, rtask, rtask, ios->io_comm, &status))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + PLOG((3, "received tmp_bufsize = %d", tmp_bufsize)); + + if (tmp_bufsize > 0) + { + if ((mpierr = MPI_Recv(&maxregions, 1, MPI_INT, rtask, ios->num_iotasks + rtask, + ios->io_comm, &status))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Recv(this_count, maxregions * fndims, MPI_OFFSET, rtask, + 2 * ios->num_iotasks + rtask, ios->io_comm, &status))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Recv(this_start, maxregions * fndims, MPI_OFFSET, rtask, + 3 * ios->num_iotasks + rtask, ios->io_comm, &status))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + PLOG((3, "received maxregions = %d this_count, this_start arrays ", maxregions)); + } + } + else + { + maxregions = iodesc->maxregions; + tmp_bufsize = iodesc->llen; + } + PLOG((3, "maxregions = %d tmp_bufsize = %d", maxregions, tmp_bufsize)); + + /* Now get each region of data. */ + loffset = 0; + for (int regioncnt = 0; regioncnt < maxregions; regioncnt++) + { + /* Get pointer where data should go. */ + bufptr = (void *)((char *)iobuf + iodesc->mpitype_size * loffset); + regionsize = 1; + + /* ??? */ + if (rtask < ios->num_iotasks) + { + for (int m = 0; m < fndims; m++) + { + start[m] = this_start[m + regioncnt * fndims]; + count[m] = this_count[m + regioncnt * fndims]; + regionsize *= count[m]; + } + } + else + { + for (int m = 0; m < fndims; m++) + { + start[m] = tmp_start[m + regioncnt * fndims]; + count[m] = tmp_count[m + regioncnt * fndims]; + regionsize *= count[m]; + } + } + loffset += regionsize; + + /* Read the data. */ + /* ierr = nc_get_vara(file->fh, vid, start, count, bufptr); */ + switch (iodesc->piotype) + { + case PIO_BYTE: + return pio_err(ios, file, PIO_EBADTYPE, __FILE__, __LINE__); + case PIO_CHAR: + return pio_err(ios, file, PIO_EBADTYPE, __FILE__, __LINE__); + case PIO_SHORT: + return pio_err(ios, file, PIO_EBADTYPE, __FILE__, __LINE__); + case PIO_INT: + ierr = GDALc_shp_get_int_field(file->pio_ncid); + break; + case PIO_FLOAT: + ierr = GDALc_shp_get_float_field(file->pio_ncid, vid, start, count, (float *)bufptr); + break; + case PIO_DOUBLE: + ierr = GDALc_shp_get_double_field(file->pio_ncid, vid, start, count, (double *)bufptr); + break; + default: + return pio_err(ios, file, PIO_EBADTYPE, __FILE__, __LINE__); + } + + /* Check error code of netCDF call. */ + if (ierr) + return check_netcdf(file, ierr, __FILE__, __LINE__); + } + + /* The decomposition may not use all of the active io + * tasks. rtask here is the io task rank and + * ios->num_iotasks is the number of iotasks actually + * used in this decomposition. */ + if (rtask < ios->num_iotasks && tmp_bufsize > 0){ + if ((mpierr = MPI_Send(iobuf, tmp_bufsize, iodesc->mpitype, rtask, + 4 * ios->num_iotasks + rtask, ios->io_comm))) + return check_mpi(NULL, file, mpierr, __FILE__, __LINE__); + } + } + } + } + +#ifdef TIMING + if ((ierr = pio_stop_timer("PIO:read_darray_nc_serial"))) + return pio_err(ios, NULL, ierr, __FILE__, __LINE__); +#endif /* TIMING */ + + PLOG((2, "pio_read_darray_shp complete ierr %d", ierr)); + return PIO_NOERR; +} + +int +GDALc_shp_get_int_field(int fileid) +{ + return PIO_NOERR; +} +int +GDALc_shp_get_double_field(int fileid, int varid, const size_t *startp, + const size_t *countp, double *ip) +{ + OGRFeatureH hF; + file_desc_t *file; /* Pointer to file information. */ + int ierr; + + /* Get file info based on fileid. */ + if ((ierr = pio_get_file(fileid, &file))) + return pio_err(NULL, NULL, ierr, __FILE__, __LINE__); + if (file->hDS == NULL) + return pio_err(NULL, NULL, ierr, __FILE__, __LINE__); + + OGRLayerH hL = OGR_DS_GetLayer( file->hDS, 0 ); + + // here, we have to assume start and count are only one dimension, and have + // only one assigned value. + for (size_t i = startp[0]; i> ip[%d]=%f\n",i,ip[i]); + } + + return PIO_NOERR; +} +GDALc_shp_get_float_field(int fileid, int varid, const size_t *startp, + const size_t *countp, float *ip) +{ + OGRFeatureH hF; + file_desc_t *file; /* Pointer to file information. */ + int ierr; + + /* Get file info based on fileid. */ + if ((ierr = pio_get_file(fileid, &file))) + return pio_err(NULL, NULL, ierr, __FILE__, __LINE__); + if (file->hDS == NULL) + return pio_err(NULL, NULL, ierr, __FILE__, __LINE__); + + OGRLayerH hL = OGR_DS_GetLayer( file->hDS, 0 ); + + // here, we have to assume start and count are only one dimension, and have + // only one assigned value. + for (size_t i = startp[0]; i diff --git a/tests/cunit/data/cb_2018_us_region_20m.shp.ea.iso.xml b/tests/cunit/data/cb_2018_us_region_20m.shp.ea.iso.xml new file mode 100755 index 000000000..544a79876 --- /dev/null +++ b/tests/cunit/data/cb_2018_us_region_20m.shp.ea.iso.xml @@ -0,0 +1,254 @@ + + + + Feature Catalog for the 2018 United States 1:20,000,000 Cartographic Boundary File + + + The Region at a scale of 1:20,000,000 + + + cb_2018_region_20m + + + 2019-05 + + + eng + + + utf8 + + + + + + + cb_2018_us_region_20m.shp + + + Current Region (national) + + + false + + + + + + REGIONCE + + + Current Census region code + + + + + + + 1 + + + Northeast + + + + + + + + 2 + + + Midwest + + + + + + + + 3 + + + South + + + + + + + + 4 + + + West + + + + + + + + + + AFFGEOID + + + American FactFinder summary level code + geovariant code + '00US' + GEOID + + + + + + + American FactFinder geographic identifier + + + + + + + + + + GEOID + + + Region code identifier, Census Region code + + + + + + + + Region code code found in REGIONCE + + + + + + + + + NAME + + + Current region name + + + + + + + Census geographic region names + + + + + + + + + + LSAD + + + Current legal/statistical area description code for region + + + + + + + 68 + + + Region (suffix) + + + + + + + + + + ALAND + + + Current land area (square meters) + + + + + + + + + + + + + + Range Domain Minimum: 0 + Range Domain Maximum: 9,999,999,999,999 + + + + + + + + + AWATER + + + Current water area (square meters) + + + + + + + + + + + + + + Range Domain Minimum: 0 + Range Domain Maximum: 9,999,999,999,999 + + + + + + + + \ No newline at end of file diff --git a/tests/cunit/data/cb_2018_us_region_20m.shp.iso.gfs b/tests/cunit/data/cb_2018_us_region_20m.shp.iso.gfs new file mode 100644 index 000000000..2542326b9 --- /dev/null +++ b/tests/cunit/data/cb_2018_us_region_20m.shp.iso.gfs @@ -0,0 +1 @@ + diff --git a/tests/cunit/data/cb_2018_us_region_20m.shp.iso.xml b/tests/cunit/data/cb_2018_us_region_20m.shp.iso.xml new file mode 100755 index 000000000..fda6aefe1 --- /dev/null +++ b/tests/cunit/data/cb_2018_us_region_20m.shp.iso.xml @@ -0,0 +1,531 @@ + + + + cb_2018_us_region_20m.shp.iso.xml + + + eng + + + UTF-8 + + + +dataset + + + + + 2019-05 + + + ISO 19115 Geographic Information - Metadata + + + 2009-02-15 + + + https://www2.census.gov/geo/tiger/GENZ2018/shp/cb_2018_us_region_20m.zip + + + + + + + + + complex + + + 4 + + + + + + + + + + + + + INCITS (formerly FIPS) codes + + + + + + + + + + + + + 2018 Cartographic Boundary File, Region for United States, 1:20,000,000 + + + + + + 2019-05 + + + publication + + + + + + + + + + + + The 2018 cartographic boundary shapefiles are simplified representations of selected geographic areas from the U.S. Census Bureau's Master Address File / Topologically Integrated Geographic Encoding and Referencing (MAF/TIGER) Database (MTDB). These boundary files are specifically designed for small-scale thematic mapping. When possible, generalization is performed with the intent to maintain the hierarchical relationships among geographies and to maintain the alignment of geographies within a file set for a given year. Geographic areas may not align with the same areas from another year. Some geographies are available as nation-based files while others are available only as state-based files. + +Regions are four groupings of states (Northeast, South, Midwest, and West) established by the Census Bureau in 1942 for the presentation of census data. + + + These files were specifically created to support small-scale thematic mapping. To improve the appearance of shapes at small scales, areas are represented with fewer vertices than detailed TIGER/Line Shapefiles. Cartographic boundary files take up less disk space than their ungeneralized counterparts. Cartographic boundary files take less time to render on screen than TIGER/Line Shapefiles. You can join this file with table data downloaded from American FactFinder by using the AFFGEOID field in the cartographic boundary file. If detailed boundaries are required, please use the TIGER/Line Shapefiles instead of the generalized cartographic boundary files. + + + + completed + + + + + + + + notPlanned + + + + + + + + Boundaries + + + theme + + + + + ISO 19115 Topic Categories + + + + + + + + + + 2018 + + + SHP + + + Cartographic Boundary + + + Generalized + + + Region + + + theme + + + + + None + + + + + + + + + + United States + + + US + + + place + + + + + ISO 3166 Codes for the representation of names of countries and their subdivisions + + + + + + + + + + otherRestrictions + + + + + + Access Constraints: None + + + Use Constraints:The intended display scale for this file is 1:20,000,000. This file should not be displayed at scales larger than 1:20,000,000. + +These products are free to use in a product or publication, however acknowledgement must be given to the U.S. Census Bureau as the source. The boundary information is for visual display at appropriate small scales only. Cartographic boundary files should not be used for geographic analysis including area or perimeter calculation. Files should not be used for geocoding addresses. Files should not be used for determining precise geographic area relationships. + + + + + + + vector + + + + + + + 20000000 + + + + + + + eng + + + + + + boundaries + + + The cartographic boundary files contain geographic data only and do not include display mapping software or statistical data. For information on how to use cartographic boundary file data with specific software package users shall contact the company that produced the software. + + + + + + + -179.174265 + + + 179.773922 + + + 17.913769 + + + 71.352561 + + + + + + + + publication date + 2019-05 + 2019-05 + + + + + + + + + + + + + true + + + + + Feature Catalog for the 2018 Region 1:20,000,000 Cartographic Boundary File + + + + + + + + + + + https://meta.geo.census.gov/data/existing/decennial/GEO/CPMB/boundary/2018cb/region_20m/2018_region_20m.ea.iso.xml + + + + + + + + + + + SHP + + + + PK-ZIP, version 1.93A or higher + + + + + + + HTML + + + + + + + + + + + The online cartographic boundary files may be downloaded without charge. + + + To obtain more information about ordering Cartographic Boundary Files visit https://www.census.gov/geo/www/tiger. + + + + + + + + + + + https://www2.census.gov/geo/tiger/GENZ2018/shp/cb_2018_us_region_20m.zip + + + Shapefile Zip File + + + + + + + + + + + https://www.census.gov/geo/maps-data/data/tiger-cart-boundary.html + + + Cartographic Boundary Shapefiles + + + Simplified representations of selected geographic areas from the Census Bureau's MAF/TIGER geographic database + + + + + + + + + + + + + dataset + + + + + + + Horizontal Positional Accuracy + + + + + + Data are not accurate. Data are generalized representations of geographic boundaries at 1:20,000,000. + + + + + + meters + + + + + Missing + + + + + + + + + The cartographic boundary files are generalized representations of extracts taken from the MAF/TIGER Database. Generalized boundary files are clipped to a simplified version of the U.S. outline. As a result, some off-shore areas may be excluded from the generalized files. Some small geographic areas, holes, or discontiguous parts of areas may not be included in generalized files if they are not visible at the target scale. + + + + + + + + The cartographic boundary files are generalized representations of extracts taken from the MAF/TIGER Database. Generalized boundary files are clipped to a simplified version of the U.S. outline. As a result, some off-shore areas may be excluded from the generalized files. Some small geographic areas, holes, or discontiguous parts of areas may not be included in generalized files if they are not visible at the target scale. + + + + + + + + The Census Bureau performed automated tests to ensure logical consistency of the source database. Segments making up the outer and inner boundaries of a polygon tie end-to-end to completely enclose the area. All polygons were tested for closure. The Census Bureau uses its internally developed geographic update system to enhance and modify spatial and attribute data in the Census MAF/TIGER database. Standard geographic codes, such as INCITS (formerly FIPS) codes for states, counties, municipalities, county subdivisions, places, American Indian/Alaska Native/Native Hawaiian areas, and congressional districts are used when encoding spatial entities. The Census Bureau performed spatial data tests for logical consistency of the codes during the compilation of the original Census MAF/TIGER database files. Feature attribute information has been examined but has not been fully tested for consistency. + +For the cartographic boundary files, the Point and Vector Object Count for the G-polygon SDTS Point and Vector Object Type reflects the number of records in the file's data table. For multi-polygon features, only one attribute record exists for each multi-polygon rather than one attribute record per individual G-polygon component of the multi-polygon feature. Cartographic Boundary File multi-polygons are an exception to the G-polygon object type classification. Therefore, when multi-polygons exist in a file, the object count will be less than the actual number of G-polygons. + + + + + + + + + + Spatial data were extracted from the MAF/TIGER database and processed through a U.S. Census Bureau batch generalization system. + + + 2019-05-01T00:00:00 + + + + + Geo-spatial Relational Database + + + + + Census MAF/TIGER database + + + MAF/TIGER + + + + + + U.S. Department of Commerce, U.S. Census Bureau, Geography Division, Geographic Customer Services Branch + + + originator + + + + + + Source Contribution: All spatial and feature data + + + + + + + + + + 201706 + 201805 + + + + + + + + + + + + + + + + + + + notPlanned + + + + This was transformed from the Census Metadata Import Format + + + + + \ No newline at end of file diff --git a/tests/cunit/data/cb_2018_us_region_20m.shx b/tests/cunit/data/cb_2018_us_region_20m.shx new file mode 100644 index 000000000..9a8dbd437 Binary files /dev/null and b/tests/cunit/data/cb_2018_us_region_20m.shx differ diff --git a/tests/cunit/test_gdal.c b/tests/cunit/test_gdal.c new file mode 100644 index 000000000..6ccb6e194 --- /dev/null +++ b/tests/cunit/test_gdal.c @@ -0,0 +1,311 @@ +/* + * Tests for PIO distributed arrays. + * + * @author Ed Hartnett + * @date 2/16/17 + */ +#include +#include +#include +#include + +/* The number of tasks this test should run on. */ +#define TARGET_NTASKS 4 + +/* The minimum number of tasks this test should run on. */ +#define MIN_NTASKS 4 + +/* The name of this test. */ +#define TEST_NAME "test_gdal" + +/* Number of processors that will do IO. */ +#define NUM_IO_PROCS 1 + +/* Number of computational components to create. */ +#define COMPONENT_COUNT 1 + +/* The number of dimensions in the example data. In this test, we + * are using three-dimensional data. */ +#define NDIM 1 + +/* But sometimes we need arrays of the non-record dimensions. */ +#define NDIM2 2 + +/* The length of our sample data along each dimension. */ +#define X_DIM_LEN 4 +#define Y_DIM_LEN 4 + +/* The number of timesteps of data to write. */ +#define NUM_TIMESTEPS 2 + +/* The names of variables in the netCDF output files. */ +#define VAR_NAME "Billy-Bob" +#define VAR_NAME2 "Sally-Sue" + +/* Test cases relating to PIOc_write_darray_multi(). */ +#define NUM_TEST_CASES_WRT_MULTI 3 + +/* Test with and without specifying a fill value to + * PIOc_write_darray(). */ +#define NUM_TEST_CASES_FILLVALUE 2 + +/* The dimension names. */ +//char dim_name[NDIM][PIO_MAX_NAME + 1] = {"timestep", "x", "y"}; + +/* Length of the dimensions in the sample data. */ +//int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN}; + +/* Create a 1D decomposition. + * + * @param ntasks the number of available tasks + * @param my_rank rank of this task. + * @param iosysid the IO system ID. + * @param dim_len an array of length 3 with the dimension sizes. + * @param ioid a pointer that gets the ID of this decomposition. + * @param pio_type the type that will be used for basetype. + * @returns 0 for success, error code otherwise. + **/ +int create_decomposition_1d(int ntasks, int my_rank, int iosysid, int *ioid, int pio_type) +{ + PIO_Offset elements_per_pe; /* Array elements per processing unit. */ + int dim_len_1d[NDIM] = {X_DIM_LEN}; + int ret; + + /* How many data elements per task? In this example we will end up + * with 2. */ + elements_per_pe = X_DIM_LEN / ntasks; + + PIO_Offset compdof[elements_per_pe]; + + /* Don't forget to add 1! */ + compdof[0] = my_rank + 1; + + /* This means fill value will be used here. */ + compdof[1] = 0; + + /* Create the PIO decomposition for this test. */ + if ((ret = PIOc_InitDecomp(iosysid, pio_type, NDIM, dim_len_1d, elements_per_pe, + compdof, ioid, NULL, NULL, NULL))) + ERR(ret); + + return 0; +} + +/** + * Test the darray functionality. Create a netCDF file with 3 + * dimensions and 1 PIO_INT variable, and use darray to write some + * data. + * + * @param iosysid the IO system ID. + * @param ioid the ID of the decomposition. + * @param num_flavors the number of IOTYPES available in this build. + * @param flavor array of available iotypes. + * @param my_rank rank of this task. + * @param pio_type the type of the data. + * @returns 0 for success, error code otherwise. + */ +int test_gdal(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank, + int pio_type) +{ + char filename[PIO_MAX_NAME + 1]; /* Name for the output files. */ + int dimids[NDIM]; /* The dimension IDs. */ + int ncid; /* The ncid of the netCDF file. */ + int ncid2; /* The ncid of the re-opened netCDF file. */ + int varid; /* The ID of the netCDF varable. */ + int varid2; /* The ID of a netCDF varable of different type. */ + int wrong_varid = TEST_VAL_42; /* A wrong ID. */ + int ret; /* Return code. */ + MPI_Datatype mpi_type; + int type_size; /* size of a variable of type pio_type */ + int other_type; /* another variable of the same size but different type */ + PIO_Offset arraylen = 4; + void *fillvalue, *ofillvalue; + void *test_data; + void *test_data_in; + int fillvalue_int = NC_FILL_INT; + int test_data_int[arraylen]; + int test_data_int_in[arraylen]; + float fillvalue_float = NC_FILL_FLOAT; + float test_data_float[arraylen]; + float test_data_float_in[arraylen]; + double fillvalue_double = NC_FILL_DOUBLE; + double test_data_double[arraylen]; + double test_data_double_in[arraylen]; + int iotype = PIO_IOTYPE_GDAL; + + GDALDatasetH hDSp; + + /* Initialize some data. */ + for (int f = 0; f < arraylen; f++) + { + test_data_int[f] = my_rank * 10 + f; + test_data_float[f] = my_rank * 10 + f + 0.5; + test_data_double[f] = my_rank * 100000 + f + 0.5; + } + + /* Use PIO to create the example file in each of the four + * available ways. */ + for (int fmt = 0; fmt < num_flavors; fmt++) + { + + /* Add a couple of extra tests for the + * PIOc_write_darray_multi() function. */ + for (int test_multi = 0; test_multi < NUM_TEST_CASES_WRT_MULTI; test_multi++) + { + sprintf(filename, "data/cb_2018_us_region_20m.shp"); + + test_data_in = test_data_float_in; + /* Open the file. */ + if ((ret = GDALc_openfile(iosysid, &ncid2, &hDSp, &iotype, filename, PIO_NOWRITE))) + ERR(ret); + + if ((ret = GDALc_inq_fieldid(ncid2, "GEOID", &varid))) + ERR(ret); + + /* Read the data. */ + if ((ret = PIOc_read_darray(ncid2, varid, ioid, arraylen, test_data_in))) + ERR(ret); + + /* Check the results. */ + for (int f = 0; f < arraylen; f++) + { + switch (pio_type) + { + case PIO_INT: + if (test_data_int_in[f] != test_data_int[f]) + return ERR_WRONG; + break; + case PIO_FLOAT: + if (test_data_float_in[f] != test_data_float[f]) + return ERR_WRONG; + break; + case PIO_DOUBLE: + if (test_data_double_in[f] != test_data_double[f]) + return ERR_WRONG; + break; + default: + ERR(ERR_WRONG); + } + } + + /* Close the netCDF file. */ + if ((ret = PIOc_closefile(ncid2))) + ERR(ret); + } /* next test multi */ + } /* next iotype */ + + return PIO_NOERR; +} + +/** + * Run all the tests. + * + * @param iosysid the IO system ID. + * @param num_flavors number of available iotypes in the build. + * @param flavor pointer to array of the available iotypes. + * @param my_rank rank of this task. + * @param test_comm the communicator the test is running on. + * @returns 0 for success, error code otherwise. + */ +int test_all_gdal(int iosysid, int num_flavors, int *flavor, int my_rank, + MPI_Comm test_comm) +{ +#define NUM_TYPES_TO_TEST 1 + int ioid; + char filename[PIO_MAX_NAME + 1]; + int pio_type[NUM_TYPES_TO_TEST] = {PIO_DOUBLE}; + int dim_len_1d[NDIM] = {X_DIM_LEN};//, Y_DIM_LEN}; + int ret; /* Return code. */ + + for (int t = 0; t < NUM_TYPES_TO_TEST; t++) + { +// /* This will be our file name for writing out decompositions. */ +// sprintf(filename, "%s_decomp_rank_%d_flavor_%d_type_%d.nc", TEST_NAME, my_rank, +// *flavor, pio_type[t]); + + /* Decompose the data over the tasks. */ + if ((ret = create_decomposition_1d(TARGET_NTASKS, my_rank, iosysid, + &ioid, pio_type[t]))) + return ret; + + printf("my_rank %d iosysid %d ioid %d ret %d\n",my_rank, iosysid, ioid, ret); + + /* Run a simple darray test. */ + if ((ret = test_gdal(iosysid, ioid, num_flavors, flavor, my_rank, pio_type[t]))) + return ret; + + /* Free the PIO decomposition. */ + if ((ret = PIOc_freedecomp(iosysid, ioid))) + ERR(ret); + } + + return PIO_NOERR; +} + +/* Run tests for darray functions. */ +int main(int argc, char **argv) +{ +#define NUM_REARRANGERS_TO_TEST 2 + int rearranger[NUM_REARRANGERS_TO_TEST] = {PIO_REARR_SUBSET, PIO_REARR_BOX}; + int my_rank; + int ntasks; + int num_flavors; /* Number of PIO netCDF flavors in this build. */ + int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */ + MPI_Comm test_comm; /* A communicator for this test. */ + int ret; /* Return code. */ + + OGRRegisterAll(); + + /* Initialize test. */ + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, + MIN_NTASKS, -1, &test_comm))) + ERR(ERR_INIT); + + PIOc_set_log_level(4); + + if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) + return ret; + + /* Only do something on max_ntasks tasks. */ + if (my_rank < TARGET_NTASKS) + { + int iosysid; /* The ID for the parallel I/O system. */ + int ioproc_stride = 1; /* Stride in the mpi rank between io tasks. */ + int ioproc_start = 0; /* Zero based rank of first processor to be used for I/O. */ + int ret; /* Return code. */ + + /* Figure out iotypes. */ + if ((ret = get_iotypes(&num_flavors, flavor))) + ERR(ret); + + for (int r = 0; r < NUM_REARRANGERS_TO_TEST; r++) + { + /* Initialize the PIO IO system. This specifies how + * many and which processors are involved in I/O. */ + if ((ret = PIOc_Init_Intracomm(test_comm, NUM_IO_PROCS, ioproc_stride, + ioproc_start, rearranger[r], &iosysid))) + return ret; + +/* if ((ret = PIOc_set_rearr_opts(iosysid, PIO_REARR_COMM_P2P, PIO_REARR_COMM_FC_2D_DISABLE, false, false, PIO_REARR_COMM_UNLIMITED_PEND_REQ, false, false, PIO_REARR_COMM_UNLIMITED_PEND_REQ))) + return ret; +*/ + + /* Run tests. */ + if ((ret = test_all_gdal(iosysid, num_flavors, flavor, my_rank, test_comm))) + return ret; + + /* Finalize PIO system. */ + if ((ret = PIOc_free_iosystem(iosysid))) + return ret; + } /* next rearranger */ + } /* endif my_rank < TARGET_NTASKS */ + + /* Finalize the MPI library. */ + if ((ret = pio_test_finalize(&test_comm))) + return ret; + /* if ((ret = pio_test_finalize2(&test_comm, TEST_NAME))) */ + /* return ret; */ + + printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); + return 0; +}