1.数据节点储存 org.apache.hadoop.hdfs.server.datanode.DataStorage
public class DataStorage extends Storage {
convertMetatadataFileNam
linkBlocks(File, File, int, HardLink)
corruptPreUpgradeStorage
doFinalize(StorageDirectory)
doRollback(StorageDirectory, NamespaceInfo)
doTransition(StorageDirectory, NamespaceInfo, StartupOption)
doUpgrade(StorageDirectory, NamespaceInfo)
finalizeUpgrade()
format(StorageDirectory, NamespaceInfo)
getFields(Properties, StorageDirectory)
getStorageID()
isConversionNeeded(StorageDirectory)
recoverTransitionRead(NamespaceInfo, Collection, StartupOption)
setFields(Properties, StorageDirectory)
setStorageID(String)
verifyDistributedUpgrade
}
其中
public abstract class Storage extends StorageInfo {
}
public class StorageInfo {
}
2.文件系统数据集org.apache.hadoop.hdfs.server.datanode.FSDataset
public classFSDataset implements FSConstants, FSDatasetInterface {
findMetaFile(File)
getCauseIfDiskError(IOException)
getGenerationStampFromFi
getMetaFile(File, Block)
getMetaFileName(String, long)
parseGenerationStamp(File, File)
truncateBlock(File, File, long, long)
updateBlockMap(Map, Block, Block)
checkDataDir()
createBlockWriteStreams(File, File)
createTmpFile(FSVolume, Block, boolean)
delBlockFromDisk(File, File, Block)
detachBlock(Block, int)
finalizeBlock(Block)
finalizeBlockIfNeeded(Block)
finalizeBlockInternal(Block, boolean)
findBlockFile(long)
getActiveThreads(Block)
getBlockFile(Block)
getBlockInputStream(Block)
getBlockInputStream(Block, long)
getBlockLocalPathInfo(Block)
getBlockReport()
getBlocksBeingWrittenRep
getCapacity()
getChannelPosition(Block, BlockWriteStreams)
getDfsUsed()
getFile(Block)
getLength(Block)
getMetaDataInputStream(Block)
getMetaDataLength(Block)
getMetaFile(Block)
getRemaining()
getStorageInfo()
getStoredBlock(long)
getTmpInputStreams(Block, long, long)
getVisibleLength(Block)
getVolumeInfo()
hasEnoughResource()
interruptAndJoinThreads(List)
invalidate(Block[])
isFinalized(Block)
isValidBlock(Block)
metaFileExists(Block)
registerMBean(String)
setChannelPosition(Block, BlockWriteStreams, long, long)
setVisibleLength(Block, long)
shutdown()
startBlockRecovery(long)
toString()
tryUpdateBlock(Block, Block)
unfinalizeBlock(Block)
updateBlock(Block, Block)
validateBlockFile(Block)
validateBlockMetadata(Block)
writeToBlock(Block, boolean, boolean)
}
3.流式接口 org.apache.hadoop.hdfs.server.datanode.DataXceiverServer
class DataXceiverServer implements Runnable, FSConstants {
}
class DataXceiver implements Runnable, FSConstants {
copyBlock(DataInputStream)
getBlockChecksum(DataInputStream)
readBlock(DataInputStream)
replaceBlock(DataInputStream)
run()
sendResponse(Socket, short, long)
}
4.作为整体DataNode:org.apache.hadoop.hdfs.server.datanode.DataNode
public class DataNode extends Configured
createDataNode(String[], Configuration)
createDataNode(String[], Configuration, SecureResources)
createInterDataNodeProto
createSocketAddr(String)
getDataNode()
getInfoAddr(Configuration)
getStartupOption(Configuration)
getStreamingAddr(Configuration)
instantiateDataNode(String[], Configuration)
instantiateDataNode(String[], Configuration, SecureResources)
isDatanodeUp(DataNode)
logRecoverBlock(String, Block, DatanodeID[])
main(String[])
makeInstance(String[], Configuration, SecureResources)
now()
parseArguments(String[], Configuration)
printUsage()
runDatanodeDaemon(DataNode)
secureMain(String[], SecureResources)
setNewStorageID(DatanodeRegistration)
setStartupOption(Configuration, StartupOption)
checkBlockLocalPathAcces
checkBlockToken(Block, AccessMode)
checkBlockToken(Block, Token, AccessMode)
checkDiskError()
checkDiskError(Exception)
checkKerberosAuthMethod(String)
delayBeforeBlockReceived
getBalancerBandwidth()
getBlockInfo(Block)
getBlockLocalPathInfo(Block, Token)
getBlockMetaDataInfo(Block)
getFSDataset()
getHostName()
getHttpPort()
getMetrics()
getNamenode()
getNameNodeAddr()
getNamenodeAddress()
getProtocolVersion(String, long)
getRpcPort()
getSelfAddr()
getVersion()
getVolumeInfo()
getXceiverCount()
handleDiskError(String)
handshake()
join()
newSocket()
notifyNamenode(int, String)
notifyNamenodeReceivedBl
offerService()
processCommand(DatanodeCommand)
processCommand(DatanodeCommand[])
processDistributedUpgrad
recoverBlock(Block, boolean, DatanodeInfo[])
recoverBlock(Block, boolean, DatanodeInfo[], boolean)
recoverBlocks(Block[], DatanodeInfo[][])
register()
registerMXBean(Configuration)
run()
scheduleBlockReport(long)
shutdown()
startBlockRecovery(Block)
startDataNode(Configuration, AbstractList, SecureResources)
startDistributedUpgradeI
syncBlock(Block, List, DatanodeInfo[], boolean)
toString()
transferBlock(Block, DatanodeInfo[])
transferBlocks(Block[], DatanodeInfo[][])
unRegisterMXBean()
updateBlock(Block, Block, boolean)
}