View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    * <p>
10   * http://www.apache.org/licenses/LICENSE-2.0
11   * <p>
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.chaos.actions;
20  
21  import java.io.IOException;
22  import java.util.LinkedList;
23  import java.util.List;
24  
25  import org.apache.hadoop.hbase.ServerName;
26  import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
27  import org.apache.hadoop.hbase.util.FSUtils;
28  import org.apache.hadoop.hdfs.DFSClient;
29  import org.apache.hadoop.hdfs.DistributedFileSystem;
30  import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
31  import org.apache.hadoop.hdfs.protocol.HdfsConstants;
32  import org.slf4j.Logger;
33  import org.slf4j.LoggerFactory;
34  
35  /**
36   * Action that restarts a random datanode.
37   */
38  public class RestartRandomDataNodeAction extends RestartActionBaseAction {
39    private static final Logger LOG = LoggerFactory.getLogger(RestartRandomDataNodeAction.class);
40  
41    public RestartRandomDataNodeAction(long sleepTime) {
42      super(sleepTime);
43    }
44  
45    @Override protected Logger getLogger() {
46      return LOG;
47    }
48  
49    @Override
50    public void perform() throws Exception {
51      getLogger().info("Performing action: Restart random data node");
52      ServerName server = PolicyBasedChaosMonkey.selectRandomItem(getDataNodes());
53      restartDataNode(server, sleepTime);
54    }
55  
56    public ServerName[] getDataNodes() throws IOException {
57      DistributedFileSystem fs = (DistributedFileSystem) FSUtils.getRootDir(getConf())
58          .getFileSystem(getConf());
59      DFSClient dfsClient = fs.getClient();
60      List<ServerName> hosts = new LinkedList<ServerName>();
61      for (DatanodeInfo dataNode: dfsClient.datanodeReport(HdfsConstants.DatanodeReportType.LIVE)) {
62        hosts.add(ServerName.valueOf(dataNode.getHostName(), -1, -1));
63      }
64      return hosts.toArray(new ServerName[0]);
65    }
66  }