public class DataNode
extends org.apache.hadoop.conf.Configured
implements org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol, org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol, org.apache.hadoop.hdfs.protocol.FSConstants, java.lang.Runnable, org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean
Modifier and Type | Field and Description |
---|---|
org.apache.hadoop.hdfs.server.datanode.DataBlockScanner |
blockScanner |
org.apache.hadoop.util.Daemon |
blockScannerThread |
org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface |
data |
static java.lang.String |
DATA_DIR_KEY |
static java.lang.String |
DATA_DIR_PERMISSION_KEY |
static java.lang.String |
DN_CLIENTTRACE_FORMAT |
org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration |
dnRegistration |
static java.lang.String |
EMPTY_DEL_HINT |
org.apache.hadoop.ipc.Server |
ipcServer |
static org.apache.commons.logging.Log |
LOG |
org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol |
namenode |
static int |
PKT_HEADER_LEN
Header size for a packet
|
versionID
BLOCKREPORT_INITIAL_DELAY, BLOCKREPORT_INTERVAL, BUFFER_SIZE, DEFAULT_BLOCK_SIZE, DEFAULT_DATA_SOCKET_SIZE, HEARTBEAT_INTERVAL, LAYOUT_VERSION, LEASE_HARDLIMIT_PERIOD, LEASE_RECOVER_PERIOD, LEASE_SOFTLIMIT_PERIOD, MAX_PATH_DEPTH, MAX_PATH_LENGTH, MIN_BLOCKS_FOR_WRITE, QUOTA_DONT_SET, QUOTA_RESET, SIZE_OF_INTEGER, SMALL_BUFFER_SIZE
Modifier and Type | Method and Description |
---|---|
protected void |
checkDiskError()
Check if there is a disk failure and if so, handle the error
|
protected void |
checkDiskError(java.lang.Exception e)
Check if there is no space in disk
|
static DataNode |
createDataNode(java.lang.String[] args,
org.apache.hadoop.conf.Configuration conf)
Instantiate & Start a single datanode daemon and wait for it to finish.
|
static DataNode |
createDataNode(java.lang.String[] args,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources resources)
Instantiate & Start a single datanode daemon and wait for it to finish.
|
static org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol |
createInterDataNodeProtocolProxy(org.apache.hadoop.hdfs.protocol.DatanodeInfo info,
org.apache.hadoop.conf.Configuration conf,
int socketTimeout,
boolean connectToDnViaHostname) |
static java.net.InetSocketAddress |
createSocketAddr(java.lang.String target)
Deprecated.
|
java.lang.Long |
getBalancerBandwidth()
Get current value of the max balancer bandwidth in bytes per second.
|
org.apache.hadoop.hdfs.protocol.Block |
getBlockInfo(org.apache.hadoop.hdfs.protocol.Block block) |
org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo |
getBlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.Block block,
org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier> token) |
org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo |
getBlockMetaDataInfo(org.apache.hadoop.hdfs.protocol.Block block) |
static DataNode |
getDataNode()
Return the DataNode object
|
org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface |
getFSDataset()
This method is used for testing.
|
java.lang.String |
getHostName() |
java.lang.String |
getHttpPort() |
static java.net.InetSocketAddress |
getInfoAddr(org.apache.hadoop.conf.Configuration conf)
Determine the http server's effective addr
|
java.lang.String |
getNamenode()
Return the namenode's identifier
|
java.net.InetSocketAddress |
getNameNodeAddr() |
java.lang.String |
getNamenodeAddress() |
long |
getProtocolVersion(java.lang.String protocol,
long clientVersion) |
java.lang.String |
getRpcPort() |
java.net.InetSocketAddress |
getSelfAddr() |
static java.net.InetSocketAddress |
getStreamingAddr(org.apache.hadoop.conf.Configuration conf) |
java.lang.String |
getVersion() |
java.lang.String |
getVolumeInfo()
Returned information is a JSON representation of a map with
volume name as the key and value is a map of volume attribute
keys to its values
|
int |
getXceiverCount()
Number of concurrent xceivers per node.
|
static DataNode |
instantiateDataNode(java.lang.String[] args,
org.apache.hadoop.conf.Configuration conf)
Instantiate a single datanode object.
|
static DataNode |
instantiateDataNode(java.lang.String[] args,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources resources)
Instantiate a single datanode object.
|
static void |
main(java.lang.String[] args) |
static DataNode |
makeInstance(java.lang.String[] dataDirs,
org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources resources)
Make an instance of DataNode after ensuring that at least one of the
given data directories (and their parent directories, if necessary)
can be created.
|
protected java.net.Socket |
newSocket()
Creates either NIO or regular depending on socketWriteTimeout.
|
protected void |
notifyNamenodeReceivedBlock(org.apache.hadoop.hdfs.protocol.Block block,
java.lang.String delHint) |
void |
offerService()
Main loop for the DataNode.
|
org.apache.hadoop.hdfs.protocol.LocatedBlock |
recoverBlock(org.apache.hadoop.hdfs.protocol.Block block,
boolean keepLength,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[] targets) |
org.apache.hadoop.util.Daemon |
recoverBlocks(org.apache.hadoop.hdfs.protocol.Block[] blocks,
org.apache.hadoop.hdfs.protocol.DatanodeInfo[][] targets) |
void |
run()
No matter what kind of exception we get, keep retrying to offerService().
|
static void |
runDatanodeDaemon(DataNode dn)
Start a single datanode daemon and wait for it to finish.
|
void |
scheduleBlockReport(long delay)
This methods arranges for the data node to send the block report at the next heartbeat.
|
static void |
secureMain(java.lang.String[] args,
org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources resources) |
static void |
setNewStorageID(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration dnReg) |
void |
shutdown()
Shut down this instance of the datanode.
|
org.apache.hadoop.hdfs.server.protocol.BlockRecoveryInfo |
startBlockRecovery(org.apache.hadoop.hdfs.protocol.Block block) |
java.lang.String |
toString() |
void |
unRegisterMXBean() |
void |
updateBlock(org.apache.hadoop.hdfs.protocol.Block oldblock,
org.apache.hadoop.hdfs.protocol.Block newblock,
boolean finalize) |
public static final org.apache.commons.logging.Log LOG
public static final java.lang.String DN_CLIENTTRACE_FORMAT
public org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol namenode
public org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface data
public org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration dnRegistration
public static final java.lang.String EMPTY_DEL_HINT
public org.apache.hadoop.hdfs.server.datanode.DataBlockScanner blockScanner
public org.apache.hadoop.util.Daemon blockScannerThread
public static final java.lang.String DATA_DIR_KEY
public static final java.lang.String DATA_DIR_PERMISSION_KEY
public org.apache.hadoop.ipc.Server ipcServer
public static final int PKT_HEADER_LEN
@Deprecated public static java.net.InetSocketAddress createSocketAddr(java.lang.String target) throws java.io.IOException
NetUtils.createSocketAddr(String)
instead.java.io.IOException
public void unRegisterMXBean()
public static java.net.InetSocketAddress getInfoAddr(org.apache.hadoop.conf.Configuration conf)
protected java.net.Socket newSocket() throws java.io.IOException
java.io.IOException
public static DataNode getDataNode()
public static org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol createInterDataNodeProtocolProxy(org.apache.hadoop.hdfs.protocol.DatanodeInfo info, org.apache.hadoop.conf.Configuration conf, int socketTimeout, boolean connectToDnViaHostname) throws java.io.IOException
java.io.IOException
public java.net.InetSocketAddress getNameNodeAddr()
public java.net.InetSocketAddress getSelfAddr()
public java.lang.String getNamenode()
public static void setNewStorageID(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration dnReg)
public void shutdown()
protected void checkDiskError(java.lang.Exception e) throws java.io.IOException
e
- that caused this checkDiskError calljava.io.IOException
protected void checkDiskError()
public int getXceiverCount()
getXceiverCount
in interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean
public void offerService() throws java.lang.Exception
java.lang.Exception
protected void notifyNamenodeReceivedBlock(org.apache.hadoop.hdfs.protocol.Block block, java.lang.String delHint)
public void run()
run
in interface java.lang.Runnable
public static void runDatanodeDaemon(DataNode dn) throws java.io.IOException
java.io.IOException
public static DataNode instantiateDataNode(java.lang.String[] args, org.apache.hadoop.conf.Configuration conf) throws java.io.IOException
runDatanodeDaemon(DataNode)
subsequently.java.io.IOException
public static DataNode instantiateDataNode(java.lang.String[] args, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources resources) throws java.io.IOException
runDatanodeDaemon(DataNode)
subsequently.resources
- Secure resources needed to run under Kerberosjava.io.IOException
public static DataNode createDataNode(java.lang.String[] args, org.apache.hadoop.conf.Configuration conf) throws java.io.IOException
java.io.IOException
public static DataNode createDataNode(java.lang.String[] args, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources resources) throws java.io.IOException
java.io.IOException
public static DataNode makeInstance(java.lang.String[] dataDirs, org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources resources) throws java.io.IOException
dataDirs
- List of directories, where the new DataNode instance should
keep its files.conf
- Configuration instance to use.resources
- Secure resources needed to run under Kerberosjava.io.IOException
public java.lang.String toString()
toString
in class java.lang.Object
public void scheduleBlockReport(long delay)
public org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface getFSDataset()
public static void secureMain(java.lang.String[] args, org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources resources)
public static void main(java.lang.String[] args)
public org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo getBlockMetaDataInfo(org.apache.hadoop.hdfs.protocol.Block block) throws java.io.IOException
getBlockMetaDataInfo
in interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol
java.io.IOException
public org.apache.hadoop.hdfs.server.protocol.BlockRecoveryInfo startBlockRecovery(org.apache.hadoop.hdfs.protocol.Block block) throws java.io.IOException
startBlockRecovery
in interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol
java.io.IOException
public org.apache.hadoop.util.Daemon recoverBlocks(org.apache.hadoop.hdfs.protocol.Block[] blocks, org.apache.hadoop.hdfs.protocol.DatanodeInfo[][] targets)
public void updateBlock(org.apache.hadoop.hdfs.protocol.Block oldblock, org.apache.hadoop.hdfs.protocol.Block newblock, boolean finalize) throws java.io.IOException
updateBlock
in interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol
java.io.IOException
public long getProtocolVersion(java.lang.String protocol, long clientVersion) throws java.io.IOException
getProtocolVersion
in interface org.apache.hadoop.ipc.VersionedProtocol
java.io.IOException
public org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo getBlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.Block block, org.apache.hadoop.security.token.Token<org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier> token) throws java.io.IOException
getBlockLocalPathInfo
in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol
java.io.IOException
public org.apache.hadoop.hdfs.protocol.LocatedBlock recoverBlock(org.apache.hadoop.hdfs.protocol.Block block, boolean keepLength, org.apache.hadoop.hdfs.protocol.DatanodeInfo[] targets) throws java.io.IOException
recoverBlock
in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol
java.io.IOException
public org.apache.hadoop.hdfs.protocol.Block getBlockInfo(org.apache.hadoop.hdfs.protocol.Block block) throws java.io.IOException
getBlockInfo
in interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol
java.io.IOException
public static java.net.InetSocketAddress getStreamingAddr(org.apache.hadoop.conf.Configuration conf)
public java.lang.String getHostName()
getHostName
in interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean
public java.lang.String getVersion()
getVersion
in interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean
public java.lang.String getRpcPort()
getRpcPort
in interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean
public java.lang.String getHttpPort()
getHttpPort
in interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean
public java.lang.String getNamenodeAddress()
getNamenodeAddress
in interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean
public java.lang.String getVolumeInfo()
getVolumeInfo
in interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean
public java.lang.Long getBalancerBandwidth()
Copyright © 2011-2014 Red Hat. All Rights Reserved.