@@ -665,13 +665,42 @@ fn rolegroup_config_map(
665
665
. add ( "dfs.datanode.registered.hostname" , "${env.POD_ADDRESS}" )
666
666
. add ( "dfs.datanode.registered.port" , "${env.DATA_PORT}" )
667
667
. add ( "dfs.datanode.registered.ipc.port" , "${env.IPC_PORT}" )
668
+ // The following two properties are set to "true" because there is a minor chance that data
669
+ // written to HDFS is not synced to disk even if a block has been closed.
670
+ // Users in HBase can control this explicitly for the WAL, but for flushes and compactions
671
+ // I believe they can't as easily (if at all).
672
+ // In theory, HBase should be able to recover from these failures, but that comes at a cost
673
+ // and there's always a risk.
674
+ // Enabling this behavior causes HDFS to sync to disk as soon as possible.
668
675
. add ( "dfs.datanode.sync.behind.writes" , "true" )
669
676
. add ( "dfs.datanode.synconclose" , "true" )
677
+ // The default (4096) hasn't changed since 2009.
678
+ // Increase to 128k to allow for faster transfers.
670
679
. add ( "io.file.buffer.size" , "131072" )
680
+ // Defaults to 10 since at least 2011.
681
+ // This controls the concurrent number of client connections (this includes DataNodes)
682
+ // to the NameNode. Ideally, we'd scale this with the number of DataNodes but this would
683
+ // lead to restarts of the NameNode.
684
+ // This should lead to better performance due to more concurrency.
671
685
. add ( "dfs.namenode.handler.count" , "50" )
686
+ // Defaults to 10 since at least 2012.
687
+ // This controls the concurrent number of client connections to the DataNodes.
688
+ // We have no idea how many clients there may be, so it's hard to assign a good default.
689
+ // Increasing to 50 should lead to better performance due to more concurrency, especially
690
+ // with use-cases like HBase.
672
691
. add ( "dfs.datanode.handler.count" , "50" )
692
+ // The following two properties default to 2 and 4 respectively since around 2013.
693
+ // They control the number of maximum replication "jobs" a NameNode assigns to
694
+ // a DataNode in a single heartbeat.
695
+ // Increasing this number will increase network usage during replication events
696
+ // but can lead to faster recovery.
673
697
. add ( "dfs.namenode.replication.max-streams" , "4" )
674
698
. add ( "dfs.namenode.replication.max-streams-hard-limit" , "8" )
699
+ // Defaults to 4096 and hasn't changed since at least 2011.
700
+ // The number of threads used for actual data transfer, so not very CPU heavy
701
+ // but IO bound. This is why the number is relatively high.
702
+ // But today's Java and IO should be able to handle more, so bump it to 8192 for
703
+ // better performance/concurrency.
675
704
. add ( "dfs.datanode.max.transfer.threads" , "8192" ) ;
676
705
if hdfs. has_https_enabled ( ) {
677
706
hdfs_site. add ( "dfs.datanode.registered.https.port" , "${env.HTTPS_PORT}" ) ;
0 commit comments