[hadoop@bigdata01 hadoop]$ cat hadoop-env.sh |grep -C 10 PID # export HADOOP_MOVER_OPTS="" ### # Advanced Users Only! ### # The directory where pid files are stored. /tmp by default. # NOTE: this should be set to a directory that can only be written to by # the user that will run the hadoop daemons. Otherwise there is the # potential for a symlink attack. export HADOOP_PID_DIR=${HADOOP_PID_DIR} export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR} # A string representing this instance of hadoop. $USER by default. export HADOOP_IDENT_STRING=$USER
[hadoop@bigdata01 hadoop]$ hdfs dfsadmin Usage: hdfs dfsadmin Note: Administrative commands can only be run as the HDFS superuser. [-report [-live] [-dead] [-decommissioning]] [-safemode <enter | leave | get | wait>]
使用hdfs dfsadmin -safemode enter 进入安全模式
使用hdfs dfsadmin -safemode leave 离开安全模式
在安全模式下,可以对HDFS进行读操作,但是不能对HDFS进行写操作
HDFS 磁盘检查
hdfs fsck /
1 2 3 4 5 6 7 8 9 10 11 12 13 14
[hadoop@bigdata01 hadoop]$ hdfs fsck Usage: DFSck <path> [-list-corruptfileblocks | [-move | -delete | -openforwrite] [-files [-blocks [-locations | -racks]]]] [-maintenance] <path> start checking from this path -move move corrupted files to /lost+found -delete delete corrupted files -files print out files being checked -openforwrite print out files opened for write -includeSnapshots include snapshot data if the given path indicates a snapshottable directory or there are snapshottable directories under it -list-corruptfileblocks print out list of missing blocks and files they belong to -blocks print out block report -locations print out locations for every block -racks print out network topology for data-node locations -maintenance print out maintenance state node details -blockId print out which file this blockId belongs to, locations (nodes, racks) of this block, and other diagnostics info (under replicated, corrupted or not, etc)
[hadoop@bigdata01 hadoop]$ hdfs fsck / 19/12/05 22:25:44 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable Connecting to namenode via http://bigdata01:50070/fsck?ugi=hadoop&path=%2F FSCK started by hadoop (auth:SIMPLE) from /192.168.52.50 for path / at Thu Dec 05 22:25:45 CST 2019 .........Status: HEALTHY Total size: 515494717 B Total dirs: 13 Total files: 9 Total symlinks: 0 Total blocks (validated): 11 (avg. block size 46863156 B) Minimally replicated blocks: 11 (100.0 %) Over-replicated blocks: 0 (0.0 %) Under-replicated blocks: 0 (0.0 %) Mis-replicated blocks: 0 (0.0 %) Default replication factor: 1 Average block replication: 1.0 Corrupt blocks: 0 Missing replicas: 0 (0.0 %) Number of data-nodes: 1 Number of racks: 1 FSCK ended at Thu Dec 05 22:25:45 CST 2019 in 16 milliseconds The filesystem under path '/' is HEALTHY
[hadoop@bigdata01 logs]$ sh /home/hadoop/app/hadoop/sbin/start-balancer.sh starting balancer, logging to /home/hadoop/app/hadoop-2.6.0-cdh5.16.2/logs/hadoop-hadoop-balancer-bigdata01.out Time Stamp Iteration# Bytes Already Moved Bytes Left To Move Bytes Being Moved
到对应的log文件中查看日志文件
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
[hadoop@bigdata01 logs]$ more hadoop-hadoop-balancer-bigdata01.log 2019-12-05 22:34:50,542 INFO org.apache.hadoop.hdfs.server.balancer.Balancer: namenodes = [hdfs://bigdata01:9000] 2019-12-05 22:34:50,544 INFO org.apache.hadoop.hdfs.server.balancer.Balancer: parameters = Balancer.Parameters [BalancingPolicy.Node, threshold = 10.0, max idle iteration = 5, #excluded nodes = 0, #included no des = 0, #source nodes = 0, run during upgrade = false] 2019-12-05 22:34:50,544 INFO org.apache.hadoop.hdfs.server.balancer.Balancer: included nodes = [] 2019-12-05 22:34:50,544 INFO org.apache.hadoop.hdfs.server.balancer.Balancer: excluded nodes = [] 2019-12-05 22:34:50,544 INFO org.apache.hadoop.hdfs.server.balancer.Balancer: source nodes = [] 2019-12-05 22:34:50,671 WARN org.apache.hadoop.util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2019-12-05 22:34:51,708 INFO org.apache.hadoop.hdfs.server.balancer.Balancer: dfs.balancer.movedWinWidth = 5400000 (default=5400000) 2019-12-05 22:34:51,709 INFO org.apache.hadoop.hdfs.server.balancer.Balancer: dfs.balancer.moverThreads = 1000 (default=1000) 2019-12-05 22:34:51,709 INFO org.apache.hadoop.hdfs.server.balancer.Balancer: dfs.balancer.dispatcherThreads = 200 (default=200) 2019-12-05 22:34:51,709 INFO org.apache.hadoop.hdfs.server.balancer.Balancer: dfs.datanode.balance.max.concurrent.moves = 50 (default=50) 2019-12-05 22:34:51,714 INFO org.apache.hadoop.hdfs.server.balancer.Balancer: dfs.balancer.max-size-to-move = 10737418240 (default=10737418240) 2019-12-05 22:34:51,726 INFO org.apache.hadoop.net.NetworkTopology: Adding a new node: /default-rack/192.168.52.50:50010 2019-12-05 22:34:51,727 INFO org.apache.hadoop.hdfs.server.balancer.Balancer: 0 over-utilized: [] 2019-12-05 22:34:51,728 INFO org.apache.hadoop.hdfs.server.balancer.Balancer: 0 underutilized: []