1 [root@soar ~]# curl http://localhost:31600/_cluster/allocation/explain?pretty 2 { 3 "index" : "event20220429_v4", 4 "shard" : 3, 5 "primary" : false, 6 "current_state" : "unassigned", 7 "unassigned_info" : { 8 "reason" : "ALLOCATION_FAILED", 9 "at" : "2022-10-13T10:04:15.030Z", 10 "failed_allocation_attempts" : 5, 11 "details" : "failed shard on node [iE97es_jRNm1I66PFE4ixQ]: failed recovery, failure RecoveryFailedException[[event20220429_v4][3]: Recovery failed from {Cc1v1pt}{Cc1v1ptYRn-Bagl6cPrxzA}{pnp55WokSdCNKTGTm4dSOA}{127.0.0.1}{127.0.0.1:31701}{xpack.installed=true} into {iE97es_}{iE97es_jRNm1I66PFE4ixQ}{c74MnsrwQL6av57bCvQBPg}{127.0.0.1}{127.0.0.1:31700}{xpack.installed=true}]; nested: RemoteTransportException[[Cc1v1pt][127.0.0.1:31701][internal:index/shard/recovery/start_recovery]]; nested: RecoveryEngineException[Phase[1] prepare target for translog failed]; nested: RemoteTransportException[[iE97es_][127.0.0.1:31700][internal:index/shard/recovery/prepare_translog]]; nested: TranslogCorruptedException[translog from source [/data/soar/arkbase-2/data/nodes/0/indices/slCdH2pjSBSxLzozq31DTw/3/translog/translog-2.tlog] is corrupted, expected shard UUID [4d 41 6c 37 33 55 72 5f 51 42 53 73 6c 49 47 33 53 30 4b 67 4e 67] but got: [33 45 42 72 44 4a 72 51 51 47 43 6c 36 61 71 31 6a 46 34 68 35 67] this translog file belongs to a different translog]; ", 12 "last_allocation_status" : "no_attempt" 13 }, 14 "can_allocate" : "no", 15 "allocate_explanation" : "cannot allocate because allocation is not permitted to any of the nodes", 16 "node_allocation_decisions" : [ 17 { 18 "node_id" : "Cc1v1ptYRn-Bagl6cPrxzA", 19 "node_name" : "Cc1v1pt", 20 "transport_address" : "127.0.0.1:31701", 21 "node_attributes" : { 22 "xpack.installed" : "true" 23 }, 24 "node_decision" : "no", 25 "deciders" : [ 26 { 27 "decider" : "max_retry", 28 "decision" : "NO", 29 "explanation" : "shard has exceeded the maximum number of retries [5] on failed allocation attempts - manually call [/_cluster/reroute?retry_failed=true] to retry, [unassigned_info[[reason=ALLOCATION_FAILED], at[2022-10-13T10:04:15.030Z], failed_attempts[5], delayed=false, details[failed shard on node [iE97es_jRNm1I66PFE4ixQ]: failed recovery, failure RecoveryFailedException[[event20220429_v4][3]: Recovery failed from {Cc1v1pt}{Cc1v1ptYRn-Bagl6cPrxzA}{pnp55WokSdCNKTGTm4dSOA}{127.0.0.1}{127.0.0.1:31701}{xpack.installed=true} into {iE97es_}{iE97es_jRNm1I66PFE4ixQ}{c74MnsrwQL6av57bCvQBPg}{127.0.0.1}{127.0.0.1:31700}{xpack.installed=true}]; nested: RemoteTransportException[[Cc1v1pt][127.0.0.1:31701][internal:index/shard/recovery/start_recovery]]; nested: RecoveryEngineException[Phase[1] prepare target for translog failed]; nested: RemoteTransportException[[iE97es_][127.0.0.1:31700][internal:index/shard/recovery/prepare_translog]]; nested: TranslogCorruptedException[translog from source [/data/soar/arkbase-2/data/nodes/0/indices/slCdH2pjSBSxLzozq31DTw/3/translog/translog-2.tlog] is corrupted, expected shard UUID [4d 41 6c 37 33 55 72 5f 51 42 53 73 6c 49 47 33 53 30 4b 67 4e 67] but got: [33 45 42 72 44 4a 72 51 51 47 43 6c 36 61 71 31 6a 46 34 68 35 67] this translog file belongs to a different translog]; ], allocation_status[no_attempt]]]" 30 }, 31 { 32 "decider" : "same_shard", 33 "decision" : "NO", 34 "explanation" : "the shard cannot be allocated to the same node on which a copy of the shard already exists [[event20220429_v4][3], node[Cc1v1ptYRn-Bagl6cPrxzA], [P], s[STARTED], a[id=DMIteCRPQcaAj6Qnftv9EA]]" 35 } 36 ] 37 }, 38 { 39 "node_id" : "iE97es_jRNm1I66PFE4ixQ", 40 "node_name" : "iE97es_", 41 "transport_address" : "127.0.0.1:31700", 42 "node_attributes" : { 43 "xpack.installed" : "true" 44 }, 45 "node_decision" : "no", 46 "deciders" : [ 47 { 48 "decider" : "max_retry", 49 "decision" : "NO", 50 "explanation" : "shard has exceeded the maximum number of retries [5] on failed allocation attempts - manually call [/_cluster/reroute?retry_failed=true] to retry, [unassigned_info[[reason=ALLOCATION_FAILED], at[2022-10-13T10:04:15.030Z], failed_attempts[5], delayed=false, details[failed shard on node [iE97es_jRNm1I66PFE4ixQ]: failed recovery, failure RecoveryFailedException[[event20220429_v4][3]: Recovery failed from {Cc1v1pt}{Cc1v1ptYRn-Bagl6cPrxzA}{pnp55WokSdCNKTGTm4dSOA}{127.0.0.1}{127.0.0.1:31701}{xpack.installed=true} into {iE97es_}{iE97es_jRNm1I66PFE4ixQ}{c74MnsrwQL6av57bCvQBPg}{127.0.0.1}{127.0.0.1:31700}{xpack.installed=true}]; nested: RemoteTransportException[[Cc1v1pt][127.0.0.1:31701][internal:index/shard/recovery/start_recovery]]; nested: RecoveryEngineException[Phase[1] prepare target for translog failed]; nested: RemoteTransportException[[iE97es_][127.0.0.1:31700][internal:index/shard/recovery/prepare_translog]]; nested: TranslogCorruptedException[translog from source [/data/soar/arkbase-2/data/nodes/0/indices/slCdH2pjSBSxLzozq31DTw/3/translog/translog-2.tlog] is corrupted, expected shard UUID [4d 41 6c 37 33 55 72 5f 51 42 53 73 6c 49 47 33 53 30 4b 67 4e 67] but got: [33 45 42 72 44 4a 72 51 51 47 43 6c 36 61 71 31 6a 46 34 68 35 67] this translog file belongs to a different translog]; ], allocation_status[no_attempt]]]" 51 } 52 ] 53 } 54 ] 55 } 56 [root@soar ~]#
重启解决
标签:node,0.1,127.0,failed,translog,shard,索引,Elasticsearch,6.8 From: https://www.cnblogs.com/yimusidian/p/16789251.html