import ucar.nc2.dt.grid.GridDataset; //导入依赖的package包/类
/**
* Estimates the optimum {@link DataReadingStrategy} from the given
* NetcdfDataset. Essentially, if the data are remote (e.g. OPeNDAP) or
* compressed, this will return {@link DataReadingStrategy#BOUNDING_BOX},
* which makes a single i/o call, minimizing the overhead. If the data are
* local and uncompressed this will return
* {@link DataReadingStrategy#SCANLINE}, which reduces the amount of data
* read.
*
* @param nc
* The NetcdfDataset from which data will be read.
* @return an optimum DataReadingStrategy for reading from the dataset
*/
public static DataReadingStrategy getOptimumDataReadingStrategy(NetcdfDataset nc) {
String fileType = nc.getFileTypeId();
if ("netCDF".equalsIgnoreCase(fileType) || "HDF4".equalsIgnoreCase(fileType)) {
return DataReadingStrategy.SCANLINE;
} else {
try {
GridDataset gridDataset = getGridDataset(nc);
for (GridDatatype grid : gridDataset.getGrids()) {
HorizontalGrid hGrid = CdmUtils
.createHorizontalGrid(grid.getCoordinateSystem());
DataType dt = grid.getDataType();
long totalsize = hGrid.size() * dt.getSize();
/*
* If the size of the largest grid is greater than a
* fraction of the maximum amount of memory, use a SCANLINE
* strategy.
*
* Here, we set the multiplier for the maximum memory.
* Although it's relatively small, objects are actually
* (considerably?) bigger than the (dimension * data type
* size) result. Additionally, we need to run everything
* else...
*
* If we get reports that this is still too large, it can be
* lowered, or we can make it configurable.
*/
double multiplier = 0.5;
if (totalsize > multiplier * Runtime.getRuntime().maxMemory()) {
return DataReadingStrategy.SCANLINE;
}
}
} catch (DataReadingException | IOException e) {
/*
* Ignore exception - it's either not a GridDataset or we can't
* open it. If it's not a GridDataset, we won't be reading it
* with a gridded strategy. If we can't open it, we're screwed
* either way.
*/
}
return DataReadingStrategy.BOUNDING_BOX;
}
}