org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException: Datanode denied communication with namenode because hostname cannot be resolved (ip=172.17.0.1, hostname=172.17.0.1): DatanodeRegistration(0.0.0.0:50010, datanodeUuid=9b235124-3b0c-4bfc-bf8c-b421fcd1ee57, infoPort=50075, infoSecurePort=0, ipcPort=50020, storageInfo=lv=-56;cid=CID-0aa8d0a2-aad0-4e07-a361-36b926bc2a9d;nsid=888616734;c=0)
at org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager.registerDatanode(DatanodeManager.java:863)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.registerDatanode(FSNamesystem.java:4528)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.registerDatanode(NameNodeRpcServer.java:1285)
at org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB.registerDatanode(DatanodeProtocolServerSideTranslatorPB.java:96)
at org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos$DatanodeProtocolService$2.callBlockingMethod(DatanodeProtocolProtos.java:28752)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:969)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2049)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2045)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2045)
^C
java.io.IOException: Cannot create directory /works/dfs/name/current
at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.clearDirectory(Storage.java:337)
at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:548)
at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:569)
at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:161)
at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:991)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1429)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1554)
17/05/19 17:15:52 ERROR namenode.NameNode: Failed to start namenode.
java.io.IOException: Cannot create directory /works/dfs/name/current
at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.clearDirectory(Storage.java:337)
at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:548)
at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:569)
at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:161)
at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:991)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1429)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1554)
17/05/19 17:15:52 INFO util.ExitUtil: Exiting with status 1
执行docker-compose exec spark-master jar cv0f /code/spark-libs.jar -C /root/spark/jars/ .
报错:
put: File /user/spark/share/lib/spark-libs.jar.COPYING could only be replicated to 0 nodes instead of minReplication (=1). There are 0 datanode(s) running and no node(s) are excluded in this operation.