Reputation: 163
I want to automatic connect to hdfs ha when namenode switch active to standby, which uri should be ?
PUT _snapshot/my_hdfs_repository
{
"type": "hdfs",
"settings": {
"uri": "hdfs://namenode:8020/",
"path": "/user/elasticsearch/repositories"
}
}
till now, I manual change the uri when hdfs namenode switch
Upvotes: 0
Views: 442
Reputation: 76
This is my setting with ha hdfs and kerberos enabled.
PUT /_snapshot/elastic_hdfs_repository
{
"type" : "hdfs",
"settings" : {
"dfs" : {
"http" : {
"policy" : "HTTPS_ONLY"
}
},
"path" : "/elasticsearch/repositories/elastic_hdfs_repository",
"conf" : {
"dfs" : {
"client" : {
"failover" : {
"proxy" : {
"provider" : {
"my-cluster-nameservice" : "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
}
}
}
},
"ha" : {
"automatic-failover" : {
"enabled" : {
"my-cluster-nameservice" : "true"
}
},
"namenodes" : {
"my-cluster-nameservice" : "namenode1,namenode2"
}
},
"data" : {
"transfer" : {
"protection" : "privacy"
}
},
"namenode" : {
"rpc-address" : {
"my-cluster-nameservice" : {
"namenode1" : "nn1.domain.com:8020",
"namenode2" : "nn2.domain.com:8020"
}
}
},
"nameservices" : "my-cluster-nameservice"
},
"fs" : {
"defaultFS" : "hdfs://elastic_hdfs_repository",
"hdfs" : {
"impl" : "org.apache.hadoop.hdfs.DistributedFileSystem"
}
},
"hadoop.http.authentication.token.validity": 36000
},
"security" : {
"principal" : "elasticsearch/[email protected]"
},
"uri" : "hdfs://my-cluster-nameservice"
}
}
Upvotes: 1