首页 > 其他分享 >探索Google Earth Engine:利用MODIS数据和R语言进行2000-2021年遥感生态指数(RSEI)的时空趋势分析

探索Google Earth Engine:利用MODIS数据和R语言进行2000-2021年遥感生态指数(RSEI)的时空趋势分析

时间:2024-11-14 22:16:50浏览次数:3  
标签:Engine MODIS Google 05 ee DateRange 30 01 var

前段时间,小编学习了在GEE上进行遥感生态指数(RSEI)的评估,非常头疼,但是实验了两周后,亲测有效,主要采用的是MODIS数据分析了2000-2021年中国内蒙古某地的RSEI时间序列分布状况,现在把学习的代码分享给大家。

1 GEE计算RSEI

1.1研究区域导入与初步定义

var sa = ee.FeatureCollection("projects/rice-planting-area/assets/hlbe");
var MOD09A1 = ee.ImageCollection("MODIS/006/MOD09A1"),
    MOD11A2 = ee.ImageCollection("MODIS/006/MOD11A2"),
    MOD13A1 = ee.ImageCollection("MODIS/006/MOD13A1"),
    table = ee.FeatureCollection("users/yangyao19960805/new");
Map.centerObject(sa,6);
var palettes = require('users/gena/packages:palettes');
var mypalette = palettes.colorbrewer.RdYlBu[6]

1.2 去云函数定义

function cloudfree_mod09a1(image){
  function bitwiseExtract(value, fromBit, toBit) {
    if (toBit === undefined) toBit = fromBit
    var maskSize = ee.Number(1).add(toBit).subtract(fromBit)
    var mask = ee.Number(1).leftShift(maskSize).subtract(1)
    return value.rightShift(fromBit).bitwiseAnd(mask)}
  var qa = image.select('StateQA')
  var cloudState = bitwiseExtract(qa, 0, 1) 
  var cloudShadowState = bitwiseExtract(qa, 2)
  var cirrusState = bitwiseExtract(qa, 8, 9)
  var mask = cloudState.eq(0) // Clear
                        .and(cloudShadowState.eq(0)) // No cloud shadow
                        .and(cirrusState.eq(0)) // No cirrus
  return image.updateMask(mask)}

1.3研究时间尺度定义

var dr0 = ee.DateRange('2000-05-01','2000-9-30');
var dr1 = ee.DateRange('2001-05-01','2001-9-30');
var dr2 = ee.DateRange('2002-05-01','2002-9-30');
var dr3 = ee.DateRange('2003-05-01','2003-9-30');
var dr4 = ee.DateRange('2004-05-01','2004-9-30');
var dr5 = ee.DateRange('2005-05-01','2005-9-30');
var dr6 = ee.DateRange('2006-05-01','2006-9-30');
var dr7 = ee.DateRange('2007-05-01','2007-9-30');
var dr8 = ee.DateRange('2008-05-01','2008-9-30');
var dr9 = ee.DateRange('2009-05-01','2009-9-30');
var dr10 = ee.DateRange('2010-05-01','2010-9-30');
var dr11 = ee.DateRange('2011-05-01','2011-9-30');
var dr12 = ee.DateRange('2012-05-01','2012-9-30');
var dr13 = ee.DateRange('2013-05-01','2013-9-30');
var dr14 = ee.DateRange('2014-05-01','2014-9-30');
var dr15 = ee.DateRange('2015-05-01','2015-9-30');
var dr16 = ee.DateRange('2016-05-01','2016-9-30');
var dr17 = ee.DateRange('2017-05-01','2017-9-30');
var dr18 = ee.DateRange('2018-05-01','2018-9-30');
var dr19 = ee.DateRange('2019-05-01','2019-9-30');
var dr20 = ee.DateRange('2020-05-01','2020-9-30');
var dr21 = ee.DateRange('2021-05-01','2021-9-30');
var dr22 = ee.DateRange("2022-05-01","2022-9-30");
var dr23 = ee.DateRange("2023-05-01","2023-9-30");
var DateRG = ee.List([dr0,dr1,dr2,dr3,dr4,dr5,dr6,dr7,dr8,dr9,dr10,dr11,dr12,dr13,dr14,dr15,dr16,dr17,dr18,dr19,dr20,dr21])

/*var Sdate = ee.Date('2000-01-01')//Start date
var Edate = ee.Date('2020-12-30')//End date
var DateRG = ee.List.sequence(0,Edate.difference(Sdate,'year'),5)//generate the date sequence list
					.map(function(n){return Sdate.advance(n,'year')})
						.map(function(i){return ee.Date(i).getRange('year')})
						*/
print(DateRG,'DateRG')

1.4 四个指标计算

function cal_mndwi(image){ return image.normalizedDifference(['sur_refl_b04', 'sur_refl_b06']).rename('mndwi')}
//var rencentIMG = MOD09A1.filterDate(DateRG.get(4)).filterBounds(sa).map(cloudfree_mod09a1).mean().clip(sa)
//var WaterMask = cal_mndwi(ee.Image(rencentIMG)).lt(0.2)
 
function GetIMG(dr){
  var dateRange = ee.DateRange(dr)
  var mndwicol =  MOD09A1.filterDate(dateRange).filterBounds(sa).map(cloudfree_mod09a1).map(cal_mndwi)
  var WaterMask = mndwicol.mean().clip(sa).lt(0.2)
  var rawIMG = ee.Image()
  function sts_minmax (image){
    var minmax = image.reduceRegion({
    reducer: ee.Reducer.minMax(),
    geometry:sa,
    scale: 500,
    maxPixels: 1e9}).values();
    return minmax;}
  function getLST(dr){
    var rawLST =  MOD11A2.filterDate(dr)
                    .filterBounds(sa).mosaic().clip(sa).select('LST_Day_1km')
                    .multiply(0.02).subtract(273.15).rename('lst').updateMask(WaterMask)//.reproject("EPSG:4326")
                    // ֱ  ʵĸĶ              
    var minMax = sts_minmax(rawLST)
    var LST = rawLST.unitScale(minMax.get(1),minMax.get(0))
    return LST}
  function getNDVI(dr){
    var rawNDVI = MOD13A1.filterDate(dr).filterBounds(sa)
                  .mosaic().clip(sa).select('NDVI')
                  .multiply(0.0001).rename('ndvi').updateMask(WaterMask)//.reproject("EPSG:4326")
    var minMax = sts_minmax(rawNDVI)
    var NDVI = rawNDVI.unitScale(minMax.get(1),minMax.get(0))
    return NDVI}
  function getWET(dr){
    var srIMG = MOD09A1.filterDate(dr).filterBounds(sa).map(cloudfree_mod09a1)
                        .mosaic().clip(sa)//.reproject("EPSG:4326")
    var rawWET = srIMG.select(0).multiply(0.1147)
              .add(srIMG.select(1).multiply(0.2489))
              .add(srIMG.select(2).multiply(0.2408))
              .add(srIMG.select(3).multiply(0.3132))
              .add(srIMG.select(4).multiply(-0.3122))
              .add(srIMG.select(5).multiply(-0.6416))
              .add(srIMG.select(6).multiply(-0.5087))
              .multiply(0.0001).rename('wet').updateMask(WaterMask)
    var minMax = sts_minmax(rawWET)
    var WET = rawWET.unitScale(minMax.get(1),minMax.get(0))
    return WET}
  function getNDBSI(dr){
    var srIMG = MOD09A1.filterDate(dr).filterBounds(sa).map(cloudfree_mod09a1)
                        .mosaic().clip(sa)//.reproject("EPSG:4326")
    var swir1 = srIMG.select(5);
    var red = srIMG.select(0);
    var nir1 = srIMG.select(1);
    var blue = srIMG.select(2);
    var green = srIMG.select(3);
    var bi = swir1.add(red).subtract(nir1.add(blue)).divide(swir1.add(red).add(nir1.add(blue)));
    var ibi = swir1.multiply(2).divide(swir1.add(nir1)).subtract(nir1.divide(nir1.add(red)).add(green.divide(green.add(swir1))))
                  .divide(swir1.multiply(2).divide(swir1.add(nir1)).add(nir1.divide(nir1.add(red)).add(green.divide(green.add(swir1)))));
    var rawNDBSI = bi.add(ibi).divide(2).rename('ndbsi').updateMask(WaterMask)
    var minMax = sts_minmax(rawNDBSI)
    var NDBSI = rawNDBSI.unitScale(minMax.get(1),minMax.get(0))
    return NDBSI}
  return rawIMG.addBands(getNDVI(dr))
                .addBands(getLST(dr))
                .addBands(getWET(dr))
                .addBands(getNDBSI(dr)).slice(1,5)}
                
var IndexCol = DateRG.map(GetIMG)
print(IndexCol,'IndexCol')

1.5 主成分分析与导出相关结果

function pca_model(image){
  var scale = 500;
  var bandNames = image.bandNames();
  var region = sa;
  var meanDict = image.reduceRegion({
    reducer: ee.Reducer.mean(),
    geometry:region,
    scale: scale,
    maxPixels: 1e9});
  var means = ee.Image.constant(meanDict.values(bandNames));
  var centered = image.subtract(means);
  var getNewBandNames = function(prefix) {
  var seq = ee.List.sequence(1, bandNames.length());
  return seq.map(function(b) {
    return ee.String(prefix).cat(ee.Number(b).int());
  })};
  var arrays = centered.toArray();
  var covar = arrays.reduceRegion({
    reducer: ee.Reducer.centeredCovariance(),
    geometry: region,
    scale: scale,
    maxPixels: 1e9
  });
  var covarArray = ee.Array(covar.get('array'));
  var eigens = covarArray.eigen();
  var eigenValues = eigens.slice(1, 0, 1);
  var eigenVectors = eigens.slice(1, 1);
  var arrayImage = arrays.toArray(1);
  var principalComponents = ee.Image(eigenVectors).matrixMultiply(arrayImage);
  var sdImage = ee.Image(eigenValues.sqrt())
    .arrayProject([0]).arrayFlatten([getNewBandNames('sd')]);
  return principalComponents
    .arrayProject([0])
    .arrayFlatten([getNewBandNames('pc')])
    .divide(sdImage);
}
var PCA1_result = ee.ImageCollection(IndexCol).map(pca_model).select(0)
//print(PCA1_result)
function pca_modeleigenValues(k){
  var image =  ee.Image(k)
  var scale = 500;
  var bandNames = image.bandNames();
  var region = sa;
  var meanDict = image.reduceRegion({
    reducer: ee.Reducer.mean(),
    geometry:region,
    scale: scale,
    maxPixels: 1e9
  });
  var means = ee.Image.constant(meanDict.values(bandNames));
  var centered = image.subtract(means);
  var getNewBandNames = function(prefix) {
  var seq = ee.List.sequence(1, bandNames.length());
  return seq.map(function(b) {
    return ee.String(prefix).cat(ee.Number(b).int());
  })};
  var arrays = centered.toArray();
  var covar = arrays.reduceRegion({
    reducer: ee.Reducer.centeredCovariance(),
    geometry: region,
    scale: scale,
    maxPixels: 1e9
  });
  var covarArray = ee.Array(covar.get('array'));
  var eigens = covarArray.eigen();
  var eigenValues = eigens.slice(1, 0, 1);
  return eigenValues}
var s_eigenValues = IndexCol.map(pca_modeleigenValues)
print(s_eigenValues,'eigenValues')
// 计算 PCA 特征向量的函数
function pca_modeleigenVectors(k) {
  var image = ee.Image(k);
  var scale = 500;
  var bandNames = image.bandNames();
  var region = sa; // 设置感兴趣区域

  // 计算每个波段的均值并中心化
  var meanDict = image.reduceRegion({
    reducer: ee.Reducer.mean(),
    geometry: region,
    scale: scale,
    maxPixels: 1e9
  });
  var means = ee.Image.constant(meanDict.values(bandNames));
  var centered = image.subtract(means);
  
  // 将影像转换为数组格式
  var arrays = centered.toArray();
  
  // 计算协方差矩阵
  var covar = arrays.reduceRegion({
    reducer: ee.Reducer.centeredCovariance(),
    geometry: region,
    scale: scale,
    maxPixels: 1e9
  });
  
  // 提取协方差矩阵并计算特征向量
  var covarArray = ee.Array(covar.get('array'));
  var eigens = covarArray.eigen();
  var eigenVectors = eigens.slice(1, 1);  // 获取特征向量
  
  // 将特征向量转换为列表形式
  var eigenVectorsList = eigenVectors.toList();
  
  // 获取影像的 ID
  var imageId = image.get('system:index');
  
  // 创建一个字典,存储特征向量及影像 ID
  var resultDict = ee.Dictionary({
    'ImageId': imageId,
    'EigenVector_1': eigenVectorsList.get(0),
    'EigenVector_2': eigenVectorsList.get(1),
    'EigenVector_3': eigenVectorsList.get(2),
    'EigenVector_4': eigenVectorsList.get(3)
  });
  
  // 将字典转换为 Feature 对象
  return ee.Feature(null, resultDict);
}

// 对影像集合应用 PCA 特征向量计算
var s_eigenVectors = IndexCol.map(pca_modeleigenVectors);

// 将结果转化为 FeatureCollection
var resultCollection = ee.FeatureCollection(s_eigenVectors);

// 导出为 CSV 文件
Export.table.toDrive({
  collection: resultCollection,
  description: 'PCA_EigenVectors', // 导出任务描述
  fileFormat: 'CSV', // 文件格式
  folder: 'GEE_Export', // 导出文件夹(可选)
  fileNamePrefix: 'pca_eigenvectors' // 文件名前缀(可选)
});


// 计算 PCA 方差百分比的函数
function pca_modeleigenpercentage(k) {
  var image = ee.Image(k);
  var scale = 500;
  var bandNames = image.bandNames();
  var region = sa; // 设置感兴趣区域
  var meanDict = image.reduceRegion({
    reducer: ee.Reducer.mean(),
    geometry: region,
    scale: scale,
    maxPixels: 1e9
  });
  var means = ee.Image.constant(meanDict.values(bandNames));
  var centered = image.subtract(means);
  
  var arrays = centered.toArray();
  var covar = arrays.reduceRegion({
    reducer: ee.Reducer.centeredCovariance(),
    geometry: region,
    scale: scale,
    maxPixels: 1e9
  });
  var covarArray = ee.Array(covar.get('array'));
  var eigens = covarArray.eigen();
  var eigenValues = eigens.slice(1, 0, 1);
  
  var eigenValuesaaa = eigenValues.toList().flatten();
  var total = eigenValuesaaa.reduce(ee.Reducer.sum());
  
  // 计算方差百分比
  var percentageVariance = eigenValuesaaa.map(function(item) {
    return (ee.Number(item).divide(total)).multiply(100).format('%.2f');
  });
  
  // 返回结果的字典(包含影像 ID 和每个主成分的方差百分比)
  var imageId = image.get('system:index');
  var resultDict = ee.Dictionary({
    'ImageId': imageId,
    'PC1_Variance': percentageVariance.get(0),
    'PC2_Variance': percentageVariance.get(1),
    'PC3_Variance': percentageVariance.get(2),
    'PC4_Variance': percentageVariance.get(3)
  });
  
  // 将字典转换为 Feature
  return ee.Feature(null, resultDict);
}

// 对影像集合应用 PCA 方差百分比计算
var s_percentageVariance = IndexCol.map(pca_modeleigenpercentage);

// 将结果转化为 FeatureCollection
var resultCollection = ee.FeatureCollection(s_percentageVariance);

// 导出为 CSV 文件
Export.table.toDrive({
  collection: resultCollection,
  description: 'PCA_Variance_Percentage', // 输出文件名
  fileFormat: 'CSV', // 文件格式
  folder: 'GEE_Export', // 导出文件夹(可选)
  fileNamePrefix: 'pca_variance_percentage' // 文件名前缀(可选)
});

1.6 RSEI结果与导出

function normlizedPCA1(image){
  function sts_minmax (image){
    var minmax = image.reduceRegion({
    reducer: ee.Reducer.minMax(),
    geometry:sa,
    scale: 500,
    maxPixels: 1e9}).values();
    return minmax;}
  var minMax = sts_minmax(image)
  return image.unitScale(minMax.get(1),minMax.get(0)).rename('rsei')}
var RSEI = PCA1_result.map(normlizedPCA1)
 
var DateRG_start = DateRG.map(function (dr){return ee.DateRange(dr).start()})
var zipRSEI = RSEI.toList(DateRG.length()).zip(DateRG_start)
function setSystime(image){
  var systime = ee.Date(ee.List(image).get(1)).advance(2,'month')
  return ee.Image(ee.List(image).get(0)).set('system:time_start',systime)}
 
var stimeRSEI = zipRSEI.map(setSystime)
Map.addLayer(ee.Image(stimeRSEI.get(20)),{min:0,max:1,palette:mypalette},'rsei')
 
print(stimeRSEI,'stimeRSEI')
var options = {
  width: 400,
  height: 240,
  legend: {position: 'top', textStyle: {color: 'blue', fontSize: 16}},
  lineWidth: 1,
  pointSize: 5, 
  vAxis:{
    title: RSEI
  },
  trendlines: {
    0: {
      type: 'linear',
      color: 'red',
      lineWidth: 1,
      opacity: 0.8,
      showR2: true,
      visibleInLegend: true
    }},
  title: 'Timeserise RSEI',
 
};
 
var RSEImean = ui.Chart.image.seriesByRegion({
  imageCollection:stimeRSEI,
  regions:sa,
  reducer: ee.Reducer.mean(),
  scale: 500,
  seriesProperty: "RSEI"
}).setOptions(options)
print(RSEImean,'RSEImean')
 
var stimeRSEIbands = ee.ImageCollection(stimeRSEI).toBands()
print(stimeRSEIbands,'stimeRSEIbands')
//Export.image.toDrive({
//  image: stimeRSEIbands,
// description: 'stimeRSEI',
// scale: 500,
// region: sa,
// maxPixels:1e13,
// fileFormat: 'GeoTIFF'
//})
 for(var j=0;j<24;j++){
Export.image.toDrive({
   image: stimeRSEIbands.select(j+'_rsei'),
   description: 'rsei'+j,
   scale:250,
   region:sa,
   fileFormat: 'GeoTIFF',
   folder: 'RSEI',
   maxPixels:1e13,
   crs: "EPSG:4326",
 });
}

通过上述代码,在GEE平台上我们看看结果,从时间序列的结果来看,效果还是不错的:

呦,还不错,根据此运算结果,我们能够得到一些重要的表格,看看NDVI、WET、LST、NDBSI四个指标的特征向量,两正两负,主成分1的贡献率超过50%,也很不错。

此时看到控制栏的task选项,我们看到了,点击run之后,我们可以下载到从2000年-2021年所有的RSEI结果。

当然,如果你改改前面的一段代码,在IndexCol这个集合出现之后,我们来改一下代码,计算一下每个指标的均值。

var IndexCol = DateRG.map(GetIMG);
// print(IndexCol,'IndexCol');
// var Image0 = ee.Image(IndexCol.get(0));
// print(Image0.select("ndvi"));
var sa = ee.FeatureCollection("projects/rice-planting-area/assets/hlbe");
var MOD09A1 = ee.ImageCollection("MODIS/006/MOD09A1"),
    MOD11A2 = ee.ImageCollection("MODIS/006/MOD11A2"),
    MOD13A1 = ee.ImageCollection("MODIS/006/MOD13A1");

// Date range list (example for 2000-2021 May to September periods)
var DateRG = ee.List([
    ee.DateRange('2000-05-01', '2000-09-30'),
    ee.DateRange('2001-05-01', '2001-09-30'),
    ee.DateRange('2002-05-01','2002-9-30'),
    ee.DateRange('2003-05-01','2003-9-30'),
    ee.DateRange('2004-05-01','2004-9-30'),
    ee.DateRange('2005-05-01','2005-9-30'),
    ee.DateRange('2006-05-01','2006-9-30'),
    ee.DateRange('2007-05-01','2007-9-30'),
    ee.DateRange('2008-05-01','2008-9-30'),
    ee.DateRange('2009-05-01','2009-9-30'),
    ee.DateRange('2010-05-01','2010-9-30'),
    ee.DateRange('2011-05-01','2011-9-30'),
    ee.DateRange('2012-05-01','2012-9-30'),
    ee.DateRange('2013-05-01','2013-9-30'), 
    ee.DateRange('2014-05-01','2014-9-30'),
    ee.DateRange('2015-05-01','2015-9-30'),
    ee.DateRange('2016-05-01','2016-9-30'),
    ee.DateRange('2017-05-01','2017-9-30'),
    ee.DateRange('2018-05-01','2018-9-30'),
    ee.DateRange('2019-05-01','2019-9-30'),
    ee.DateRange('2020-05-01','2020-9-30'),
    ee.DateRange('2021-05-01','2021-9-30'),
  /* Add more ranges as needed */
]);

// Function to calculate mean values for all bands for a specific date range
function calculateAnnualMeans(dr) {
  var startDate = ee.Date(ee.DateRange(dr).start());
  var year = startDate.get('year');

  // Get the image for the corresponding date range
  var image = ee.Image(IndexCol.get(DateRG.indexOf(dr)));
  
  // Calculate mean values for each band
  var ndviMean = image.select('ndvi').reduceRegion({
    reducer: ee.Reducer.mean(),
    geometry: sa,
    scale: 250,
    maxPixels: 1e9
  }).get('ndvi');
  
  var lstMean = image.select('lst').reduceRegion({
    reducer: ee.Reducer.mean(),
    geometry: sa,
    scale: 250,
    maxPixels: 1e9
  }).get('lst');
  
  var wetMean = image.select('wet').reduceRegion({
    reducer: ee.Reducer.mean(),
    geometry: sa,
    scale: 250,
    maxPixels: 1e9
  }).get('wet');
  
  var ndbsiMean = image.select('ndbsi').reduceRegion({
    reducer: ee.Reducer.mean(),
    geometry: sa,
    scale: 250,
    maxPixels: 1e9
  }).get('ndbsi');
  
  // Create a Feature with the mean values and year attribute
  return ee.Feature(null, {
    'year': year,
    'mean_NDVI': ndviMean,
    'mean_LST': lstMean,
    'mean_WET': wetMean,
    'mean_NDBSI': ndbsiMean
  });
}

// Map over each date range to calculate annual means and create a FeatureCollection
var annualMeans = ee.FeatureCollection(DateRG.map(calculateAnnualMeans));

// Export the FeatureCollection as a single Excel (CSV) file
Export.table.toDrive({
  collection: annualMeans,
  description: 'Annual_Mean_Values_All_Bands',
  folder: 'GEE_Exports',
  fileFormat: 'CSV'
});

 上述代码运行之后,你可以得到四个指标的均值显示。

关于代码分享的链接:

https://code.earthengine.google.com/54bd2ca9ab3e0f8c074c4ba9bf074d24?noload=1

2 R语言的一些建议

22张数据下载之后,对栅格数据进行一些处理就是必须要考虑的,此处我们选择R语言来进一步分析RSEI的情况。

2.1 均值计算与导出

library(terra)

# 定义输入文件夹路径
input_folder <- "F:/result1/RSEI"

# 获取所有 TIF 文件
tiff_files <- list.files(path = input_folder, pattern = "\\.tif$", full.names = TRUE)

# 读取第一个栅格数据作为参考栅格
reference_raster <- rast(tiff_files[1])

# 打印参考栅格的基本信息,确保对齐无误
cat("Reference raster extent:", as.character(ext(reference_raster)), "\n")
cat("Reference raster resolution:", res(reference_raster), "\n")
cat("Reference raster projection:", crs(reference_raster), "\n")

# 初始化一个空栅格,用于累加栅格数据
# 方式1: 使用 reference_raster 的相同大小和分辨率,但初始化为零
sum_raster <- reference_raster * 0  # 确保 sum_raster 也是一个有效的栅格对象

# 或者使用如下方式进行初始化(等价的)
# sum_raster <- rast(reference_raster)
# values(sum_raster) <- 0

# 统计栅格文件数量
num_files <- length(tiff_files)

# 循环处理每个栅格文件,逐个计算均值
for (tiff_file in tiff_files) {
  # 读取栅格数据
  raster_data <- rast(tiff_file)
  
  # 打印栅格的基本信息
  cat("Processing raster:", tiff_file, "\n")
  cat("Extent:", as.character(ext(raster_data)), "\n")  # 转换为字符类型后输出
  cat("Resolution:", res(raster_data), "\n")
  cat("Projection:", crs(raster_data), "\n")
  
  # 检查是否包含有效值
  values_check <- values(raster_data)
  if (all(is.na(values_check))) {
    cat("Warning: The raster", tiff_file, "has no valid values.\n")
    next  # 跳过该文件,处理下一个
  } else {
    cat("The raster", tiff_file, "contains valid values.\n")
  }
  
  # 对齐栅格数据到参考栅格的范围和分辨率
  aligned_raster <- project(raster_data, reference_raster)
  
  # 累加栅格数据
  sum_raster <- sum(sum_raster, aligned_raster, na.rm = TRUE)
}

# 计算均值
mean_raster <- sum_raster / num_files

# 构建输出文件路径
output_path <- "F:/result1/mean_output.tif"

# 写入均值栅格数据到输出文件
writeRaster(mean_raster, output_path, overwrite = TRUE)

# 显示均值栅格
plot(mean_raster, main = "Mean Raster", col = terrain.colors(20))

cat("Mean raster saved to:", output_path, "\n")

这里我们用来计算多年均值栅格,即将22张合成为一张均值栅格。

2.2 RSEI批量分级

RSEI根据不同的阈值范围,划分成不同的生态环境质量标准,在ArcGIS里面一张一张分,太繁琐了,我们来批量分吧。

library(terra)

# 获取栅格影像所在文件夹路径(假设所有的tif文件在同一文件夹)
folder_path <- "F:/RSEIRECLASS"  # 替换成你的文件夹路径
tif_files <- list.files(folder_path, pattern = "\\.tif$", full.names = TRUE)

# 定义RSEI分级标准
breaks <- c(0, 0.20, 0.40, 0.60, 0.80, 1.00)  # 分级的阈值
labels <- c(1, 2, 3, 4, 5)  # 对应的标签:1表示差,5表示非常好

# 循环处理每个文件
for (file in tif_files) {
  # 读取栅格影像
  rsei_raster <- rast(file)
  
  # 使用classify()进行分级
  rsei_classified <- classify(rsei_raster, 
                              rbind(c(-Inf, 0.20, 1), 
                                    c(0.20, 0.40, 2), 
                                    c(0.40, 0.60, 3), 
                                    c(0.60, 0.80, 4), 
                                    c(0.80, Inf, 5)))

  # 获取文件名并创建新的保存路径
  output_file <- paste0(sub(".tif$", "", basename(file)), "_classified.tif")
  output_path <- file.path(folder_path, output_file)
  #如果想要保存到其他的文件夹,这一段代码可以改成下面这段:
  #output_path <- file.path("path_to_output_folder", output_file)

  # 保存分级后的栅格影像
  writeRaster(rsei_classified, output_path, overwrite = TRUE)
  
  # 加载并可视化处理后的栅格影像
  cat("Processed and saved:", output_path, "\n")
  plot(rsei_classified, main = paste("Processed: ", basename(file)), col = c("red", "orange", "yellow", "lightgreen", "green"))
}

2.3 统计不同类别像元占比

对RSEI进行质量分级后,我们需要知道不同级别的像元能够占有多少百分比,并且进行柱形图的绘制,应该怎么做。

library(terra)
library(ggplot2)
library(dplyr)

# 读取2000-2010年和2011-2021年的RSEI文件
rse_2000_2010 <- rast("F:/result1/senmk/senmkreclass.tif")
rse_2011_2021 <- rast("F:/result1/senmk/senmk2reclass.tif")

# 获取2000-2010年和2011-2021年的像元值
rse_values_2000_2010 <- values(rse_2000_2010)
rse_values_2011_2021 <- values(rse_2011_2021)

# 统计每个等级的像元数量
level_counts_2000_2010 <- table(factor(rse_values_2000_2010, levels = c(-2, -1, 0, 1, 2)))
level_counts_2011_2021 <- table(factor(rse_values_2011_2021, levels = c(-2, -1, 0, 1, 2)))

# 计算总像元数
total_pixels_2000_2010 <- sum(level_counts_2000_2010)
total_pixels_2011_2021 <- sum(level_counts_2011_2021)

# 计算每个等级的像元占比
level_percentage_2000_2010 <- level_counts_2000_2010 / total_pixels_2000_2010 * 100
level_percentage_2011_2021 <- level_counts_2011_2021 / total_pixels_2011_2021 * 100

# 创建数据框
level_data <- data.frame(
  Level = rep(c("显著下降", "下降", "稳定", "提升", "显著提升"), 2),
  Percentage = c(level_percentage_2000_2010, level_percentage_2011_2021),
  TimePeriod = rep(c("2000-2010", "2011-2021"), each = 5)
)

# 绘制柱形图
ggplot(level_data, aes(x = TimePeriod, y = Percentage, fill = Level)) +
  geom_bar(stat = "identity", position = "dodge") +
  labs(title = "Pixel Proportion by RSEI Trend Level for Two Time Periods",
       x = "时间段", y = "像元占比 (%)") +
  scale_fill_manual(values = c("显著下降" = "#FF0000",  # 红色
                               "下降" = "#FF7F00",    # 橙色
                               "稳定" = "#FFFF00",    # 黄色
                               "提升" = "#00FF00",    # 绿色
                               "显著提升" = "#0000FF")) +  # 蓝色
  theme_minimal() +
  theme(legend.position = "top", axis.text.x = element_text(angle = 45, hjust = 1))

看看R语言简洁的界面:

2.4 Sen+MK趋势分析

如果需要对RSEI的时间序列栅格进行Sen+MK检验,如果操作。

library(trend)
#输入一个文件夹内的单波段TIFF数据,在这里是历年的NDVI年最大值
flnames <- list.files(path = 'F:/result1/2011-2021/', pattern = '.tif$')
fl <- paste0("F:/result1/2011-2021/", flnames)
firs <- rast(fl)

#Sen+MK计算
fun_sen <- function(x){
  if(length(na.omit(x)) <1) return(c(NA, NA, NA))   #删除数据不连续含有NA的像元
  MK_estimate <- trend::sens.slope(ts(na.omit(x), start = 2011, end = 2021, frequency = 1), conf.level = 0.95) #Sen斜率估计
  slope <- MK_estimate$estimates
  MK_test <- MK_estimate$p.value
  Zs <- MK_estimate$statistic
  return(c(Zs, slope, MK_test))
}

firs_sen = app(firs, fun_sen, cores=4 )
names(firs_sen) = c("Z", "slope", "p-value")

#显示输出
plot(firs_sen)
writeRaster(firs_sen, filename = "F:/result1/senmk/senmk2.tif")



library(terra)

firs_sen <- rast("F:/result1/senmk/senmk2.tif")
# 提取 "Z" 和 "slope" 图层
z_layer <- firs_sen[["Z"]]       # "Z" 图层
slope_layer <- firs_sen[["slope"]]  # "slope" 图层

# 对 "Z" 图层进行重分类
z_classes <- c(-Inf, -1.96, 1.96, Inf)  # 定义分类区间
z_values <- c(2, 1, 2)  # 对应分类值
reclass_z <- classify(z_layer, cbind(z_classes[-length(z_classes)], z_classes[-1], z_values))  # 重分类

# 对 "slope" 图层进行重分类
slope_classes <- c(-Inf, -0.0005, 0.0005, Inf)  # 定义分类区间
slope_values <- c(-1, 0, 1)  # 对应分类值
reclass_s <- classify(slope_layer, cbind(slope_classes[-length(slope_classes)], slope_classes[-1], slope_values))  # 重分类

# 将两个重分类结果相乘
result <- reclass_z * reclass_s

# 保存结果为新的TIFF文件
output_path <- "F:/result1/senmk/senmk2reclass.tif"
plot(result)
writeRaster(result, filename = output_path, overwrite = TRUE)

# 显示完成提示
print("重分类和保存已完成!")

上述代码,一键帮你重分类啊,不需要在ArcGIS里面再对影像进行分类了。

2.5.分析变异系数(CV)

变异系数反映了时间序列栅格的稳定程度,我们来看看R语言里面如何实现。

# 加载 terra 包
library(terra)

# 设置影像文件路径
input_directory <- "F:/shiyan1/shiyan2"
output_directory <- "F:/shiyan1/shiyan2"
file_paths <- list.files(path = input_directory, pattern = "\\.tif$", full.names = TRUE)

# 检查是否找到影像文件
if (length(file_paths) == 0) {
  stop("未找到符合条件的文件,请检查路径和文件名模式")
}

# 读取影像文件并将它们堆叠为一个 SpatRaster 对象
raster_stack <- rast(file_paths)

# 计算每个像素的均值和标准差
mean_raster <- app(raster_stack, mean, na.rm = TRUE)
sd_raster <- app(raster_stack, sd, na.rm = TRUE)

# 计算 CV(标准差 / 均值)
cv_raster <- sd_raster / mean_raster

# 定义分级区间和标签(确保 breaks 和 labels 数量匹配)
breaks <- c(0, 0.1, 0.3, 0.5, Inf)  # 设置分级阈值
labels <- c(1, 2, 3, 4)  # 分级标签,分别对应低到高的等级

# 构建用于 classify 的矩阵
class_intervals <- cbind(breaks[-length(breaks)], breaks[-1], labels)

# 应用分级
classified_cv <- classify(cv_raster, rcl = class_intervals)

# 检查输出目录是否存在,不存在则创建
if (!dir.exists(output_directory)) {
  dir.create(output_directory)
}

# 设置输出文件路径
output_file <- file.path(output_directory, "CV_output1.tif")

# 保存分级后的 CV 结果栅格
writeRaster(classified_cv, filename = output_file, overwrite = TRUE)

# 可视化分级结果
plot(classified_cv, main = "Classified CV Stability of Raster Images (2000-2021)", col = c("blue", "green", "yellow", "red"))

如果有的宝宝喜欢matlab,也可以用它来试试!

[a, R] = readgeoraster('G:/Maping/Making money/rsei_1400/RESI/RESI/rsei2000.tif');  
info = geotiffinfo('G:/Maping/Making money/rsei_1400/RESI/RESI/rsei2000.tif');
[m, n] = size(a);

datasum = zeros(m * n, 22) + NaN;

for year = 2000:2021
    filename = ['G:/Maping/Making money/rsei_1400/RESI/RESI/rsei', int2str(year), '.tif'];
    data = importdata(filename);
    data = reshape(data, m * n, 1);
    datasum(:, year - 1999) = data;
end

CV = zeros(1, m * n) + nan;
for i = 1:length(datasum)
    a = datasum(i, :);
    if min(a) >= 0
        mean0 = mean(a);
        b = 0;
        for j = 1:22
            c = (a(j) - mean0)^2;
            b = b + c;
        end
        b = sqrt(b / 21);
        CV(i) = b / mean0;
    end
end

CV = reshape(CV, m, n);
geotiffwrite('G:/Maping/Making money/rsei_1400/RESI/cv变异系数.tif', CV, R, 'GeoKeyDirectoryTag', info.GeoTIFFTags.GeoKeyDirectoryTag);
disp('OK!');

2.6 Hurst指数分析

最后就是,小编借助GPT,想试试能不能搞得出时间序列栅格的Hurst指数,实验代码提供以下:

# 加载必要的库
library(terra)

# 设置输入和输出文件夹路径
input_folder <- "F:/shiyan1/shiyan2"  # 输入栅格影像所在文件夹路径
output_folder <- "F:/shiyan1/shiyan2"  # 输出结果保存文件夹路径

# 获取所有栅格文件(假设文件命名为2000.tif, 2001.tif, ..., 2021.tif)
raster_files <- list.files(path = input_folder, pattern = "*.tif", full.names = TRUE)

# 读取栅格数据
rasters <- rast(raster_files)

# 计算Hurst指数的函数
hurst_index <- function(x) {
  # 检查输入数据是否有效
  if (length(x) < 2) return(NA)  # 如果时间序列太短,返回NA
  n <- length(x)
  x_cumsum <- cumsum(x - mean(x))  # 计算累积和
  range_x <- max(x_cumsum) - min(x_cumsum)  # 计算范围
  std_x <- sd(x)  # 计算标准差
  hurst <- log(range_x / std_x) / log(n)  # 计算Hurst指数
  return(hurst)
}

# 使用app()函数计算每个像素的Hurst指数
hurst_map <- app(rasters, fun = hurst_index)

# 可视化Hurst指数的结果
plot(hurst_map, main = "Hurst Index Map")

# 获取年份(从文件名中提取)
years <- sub("(.*/|.tif$)", "", raster_files)  # 提取文件名中的年份

# 将Hurst指数分成5个等级
class_breaks <- c(-Inf, 0.2, 0.4, 0.6, 0.8, Inf)  # 这里的分级可以根据你的实际需求调整
class_labels <- c("Very Low", "Low", "Medium", "High", "Very High")

# 使用classify()函数进行分级
hurst_classified <- classify(hurst_map, rcl = cbind(class_breaks, 1:5))

# 可视化分级后的Hurst指数
plot(hurst_classified, main = "Classified Hurst Index Map")

# 导出分级后的Hurst指数栅格图层到指定文件夹
for (i in 1:nlyr(hurst_classified)) {
  # 提取每年的分级后的Hurst指数栅格
  hurst_layer <- hurst_classified[[i]]
  
  # 设置输出文件路径
  output_file <- file.path(output_folder, paste0(years[i], "_hurst_index_classified.tif"))
  
  # 导出栅格图层
  writeRaster(hurst_layer, filename = output_file, overwrite = TRUE)
  cat("导出:", output_file, "\n")
}

不知道结果对不对,大家可以用其他的方式试试看,对比一下结果哈。

好啦,以上就是今天的全部分享,希望能够帮到大家噢!

标签:Engine,MODIS,Google,05,ee,DateRange,30,01,var
From: https://blog.csdn.net/Promising_GEO/article/details/143781464

相关文章

  • Google Play上架被拒的原因以及解决方法合集
     GooglePlay商店是全球Android开发者发布应用的首选平台,但在这个平台上发布或更新应用时,开发者必须遵守严格的规定和政策。如果违反这些规定,应用可能会被拒绝上架或更新,甚至可能导致开发者账号被封禁。本文将总结GooglePlay上架或更新被拒的常见原因,并提供相应的解决方案。......
  • Google Ads账号被封原因与申诉方法
      GoogleAds是全球最大的在线广告平台之一,为广告主提供了广泛的广告服务。然而,即使在遵守规则的情况下,有时账户也可能因为各种原因遭遇封禁。了解这些原因和申诉方法对于维护广告账户的健康至关重要。一、GoogleAds封户原因:1、违反平台政策:1)规避系统:常见原因包括在账......
  • GEE训练教程—— Google Earth Engine 中分析马耳他的植被指数(NDVI 和 EVI)
    目录简介MODIS/006/MOD13Q1数据函数visualize(bands, gain, bias, min, max, gamma, opacity, palette, forceRgbOutput)Arguments:Returns: Image代码结果简介在GoogleEarthEngine中分析马耳他的植被指数(NDVI和EVI)###代码说明1.**边界数据集**......
  • 计算引擎engine2x的枚举获取api
    直接代码:API=>:POST'http://ip:port/api/portal/operat'Content-Type:application/x-www-form-urlencodedac=get_enum_items_by_idid=d6bf6e1bf7f34a59b9a8bca61a7ef9f9//ajaxvarsettings={"url":"http://ip:port/api/portal/ope......
  • 2025年工程管理与安全工程国际学术会议 (EMSE 2025) 2025 International Conference
    重要信息官网:https://ais.cn/u/vEbMBz......
  • 解决google reCaptcha 认证问题
    参考https://xiaoxinblog.xyz/posts/65d46cc7.htmlF12openconsole键盘输入允许粘贴!(function(){"usestrict";document.querySelectorAll("script").forEach(function(e){if(e.src.indexOf("googleapis.com")>=0||......
  • 如何在60分钟内进行ASO竞争对手分析(App Store 和 Google Play Store)
       如今,移动应用程序的数量已达数以百万计,无论您是否是市场新手,都不能否认您迟早会面临激烈的竞争。因此,ASO竞争对手分析是您取得成功的重要方面。如果您不知道自己的竞争对手是谁,您就不可能达到顶峰或保持领先地位。在这篇文章中,让我们介绍一下在不到60分钟的时间内对竞......
  • 字节豆包发布新模型,AI 一句话 P 图;Google 正式推出 Vids,简单提示即可生成视频演示丨 R
       开发者朋友们大家好: 这里是「RTE开发者日报」,每天和大家一起看新闻、聊八卦。我们的社区编辑团队会整理分享RTE(Real-TimeEngagement)领域内「有话题的新闻」、「有态度的观点」、「有意思的数据」、「有思考的文章」、「有看点的会议」,但内容仅代表编辑......
  • ENVI55扩展工具: MODIS Gap-Filled 数据读取工具
    1工具介绍工具支持ENVI5.5及以上版本。大部分MODIS产品数据均可使用MCTK工具打开和处理。但是最近在使用MODISGap-Filled数据时,发现MCTK工具并不支持,会弹出如下提示。 MODISGap-Filled数据通常为年合成产品,例如MOD17A3HGF为年合成植被净初级生产力和总初级......
  • Google账号和Play开发者账号的各种骚操作你都知道吗?
    让我们带着问题粗发!Google账号能不能更换登录Gmail邮箱?注销Google账号后,Gmail邮箱是否还可以使用?Google账号如何注销?注销Google账号后,GooglePlay开发者账号是否会一并注销?GooglePlay开发者账号能不能注销?如果能,如何注销?是否能退回25美元注册费用吗?开发者名......