1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.chaos.actions;
20
21 import java.io.IOException;
22 import java.util.LinkedList;
23 import java.util.List;
24
25 import org.apache.hadoop.hbase.ServerName;
26 import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
27 import org.apache.hadoop.hbase.util.FSUtils;
28 import org.apache.hadoop.hdfs.DFSClient;
29 import org.apache.hadoop.hdfs.DistributedFileSystem;
30 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
31 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
32 import org.slf4j.Logger;
33 import org.slf4j.LoggerFactory;
34
35
36
37
38 public class RestartRandomDataNodeAction extends RestartActionBaseAction {
39 private static final Logger LOG = LoggerFactory.getLogger(RestartRandomDataNodeAction.class);
40
41 public RestartRandomDataNodeAction(long sleepTime) {
42 super(sleepTime);
43 }
44
45 @Override protected Logger getLogger() {
46 return LOG;
47 }
48
49 @Override
50 public void perform() throws Exception {
51 getLogger().info("Performing action: Restart random data node");
52 ServerName server = PolicyBasedChaosMonkey.selectRandomItem(getDataNodes());
53 restartDataNode(server, sleepTime);
54 }
55
56 public ServerName[] getDataNodes() throws IOException {
57 DistributedFileSystem fs = (DistributedFileSystem) FSUtils.getRootDir(getConf())
58 .getFileSystem(getConf());
59 DFSClient dfsClient = fs.getClient();
60 List<ServerName> hosts = new LinkedList<ServerName>();
61 for (DatanodeInfo dataNode: dfsClient.datanodeReport(HdfsConstants.DatanodeReportType.LIVE)) {
62 hosts.add(ServerName.valueOf(dataNode.getHostName(), -1, -1));
63 }
64 return hosts.toArray(new ServerName[0]);
65 }
66 }