View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one or more
3    * contributor license agreements. See the NOTICE file distributed with this
4    * work for additional information regarding copyright ownership. The ASF
5    * licenses this file to you under the Apache License, Version 2.0 (the
6    * "License"); you may not use this file except in compliance with the License.
7    * You may obtain a copy of the License at
8    *
9    * http://www.apache.org/licenses/LICENSE-2.0
10   *
11   * Unless required by applicable law or agreed to in writing, software
12   * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13   * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14   * License for the specific language governing permissions and limitations
15   * under the License.
16   */
17  package org.apache.hadoop.hbase.util;
18  
19  import java.io.BufferedReader;
20  import java.io.BufferedWriter;
21  import java.io.File;
22  import java.io.FileInputStream;
23  import java.io.FileNotFoundException;
24  import java.io.FileWriter;
25  import java.io.FilenameFilter;
26  import java.io.IOException;
27  import java.io.InputStreamReader;
28  import java.io.PrintStream;
29  import java.util.ArrayList;
30  import java.util.Collections;
31  import java.util.HashMap;
32  import java.util.HashSet;
33  import java.util.List;
34  import java.util.Map;
35  import java.util.Scanner;
36  import java.util.Set;
37  import java.util.TreeMap;
38  import java.util.regex.Matcher;
39  import java.util.regex.Pattern;
40  
41  import org.apache.commons.io.FileUtils;
42  import org.apache.commons.logging.Log;
43  import org.apache.commons.logging.LogFactory;
44  import org.apache.hadoop.conf.Configuration;
45  import org.apache.hadoop.hbase.HBaseTestingUtility;
46  import org.apache.hadoop.hbase.HConstants;
47  import org.apache.hadoop.hbase.testclassification.LargeTests;
48  import org.apache.hadoop.hbase.MiniHBaseCluster;
49  import org.apache.hadoop.hbase.TableName;
50  import org.apache.hadoop.hbase.client.HTable;
51  import org.apache.hadoop.hbase.zookeeper.ZKUtil;
52  import org.apache.hadoop.hdfs.MiniDFSCluster;
53  import org.junit.experimental.categories.Category;
54  
55  /**
56   * A helper class for process-based mini-cluster tests. Unlike
57   * {@link MiniHBaseCluster}, starts daemons as separate processes, allowing to
58   * do real kill testing.
59   */
60  @Category(LargeTests.class)
61  public class ProcessBasedLocalHBaseCluster {
62  
63    private final String hbaseHome, workDir;
64    private final Configuration conf;
65    private final int numMasters, numRegionServers, numDataNodes;
66    private final List<Integer> rsPorts, masterPorts;
67  
68    private final int zkClientPort;
69  
70    private static final int MAX_FILE_SIZE_OVERRIDE = 10 * 1000 * 1000;
71  
72    private static final Log LOG = LogFactory.getLog(
73        ProcessBasedLocalHBaseCluster.class);
74  
75    private List<String> daemonPidFiles =
76        Collections.synchronizedList(new ArrayList<String>());;
77  
78    private boolean shutdownHookInstalled;
79  
80    private String hbaseDaemonScript;
81  
82    private MiniDFSCluster dfsCluster;
83  
84    private HBaseTestingUtility testUtil;
85  
86    private Thread logTailerThread;
87  
88    private List<String> logTailDirs = Collections.synchronizedList(new ArrayList<String>());
89  
90    private static enum ServerType {
91      MASTER("master"),
92      RS("regionserver"),
93      ZK("zookeeper");
94  
95      private final String fullName;
96  
97      private ServerType(String fullName) {
98        this.fullName = fullName;
99      }
100   }
101 
102   /**
103    * Constructor. Modifies the passed configuration.
104    * @param conf the {@link Configuration} to use
105    * @param numDataNodes the number of data nodes
106    * @param numRegionServers the number of region servers
107    */
108   public ProcessBasedLocalHBaseCluster(Configuration conf,
109       int numDataNodes, int numRegionServers) {
110     this.conf = conf;
111     this.hbaseHome = HBaseHomePath.getHomePath();
112     this.numMasters = 1;
113     this.numRegionServers = numRegionServers;
114     this.workDir = hbaseHome + "/target/local_cluster";
115     this.numDataNodes = numDataNodes;
116 
117     hbaseDaemonScript = hbaseHome + "/bin/hbase-daemon.sh";
118     zkClientPort = HBaseTestingUtility.randomFreePort();
119 
120     this.rsPorts = sortedPorts(numRegionServers);
121     this.masterPorts = sortedPorts(numMasters);
122 
123     conf.set(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST);
124     conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClientPort);
125   }
126 
127   /**
128    * Makes this local HBase cluster use a mini-DFS cluster. Must be called before
129    * {@link #startHBase()}.
130    * @throws IOException
131    */
132   public void startMiniDFS() throws Exception {
133     if (testUtil == null) {
134       testUtil = new HBaseTestingUtility(conf);
135     }
136     dfsCluster = testUtil.startMiniDFSCluster(numDataNodes);
137   }
138 
139   /**
140    * Generates a list of random port numbers in the sorted order. A sorted
141    * order makes sense if we ever want to refer to these servers by their index
142    * in the returned array, e.g. server #0, #1, etc.
143    */
144   private static List<Integer> sortedPorts(int n) {
145     List<Integer> ports = new ArrayList<Integer>(n);
146     for (int i = 0; i < n; ++i) {
147       ports.add(HBaseTestingUtility.randomFreePort());
148     }
149     Collections.sort(ports);
150     return ports;
151   }
152 
153   public void startHBase() throws IOException {
154     startDaemonLogTailer();
155     cleanupOldState();
156 
157     // start ZK
158     LOG.info("Starting ZooKeeper on port " + zkClientPort);
159     startZK();
160 
161     HBaseTestingUtility.waitForHostPort(HConstants.LOCALHOST, zkClientPort);
162 
163     for (int masterPort : masterPorts) {
164       startMaster(masterPort);
165     }
166 
167     ZKUtil.waitForBaseZNode(conf);
168 
169     for (int rsPort : rsPorts) {
170       startRegionServer(rsPort);
171     }
172 
173     LOG.info("Waiting for HBase startup by scanning META");
174     int attemptsLeft = 10;
175     while (attemptsLeft-- > 0) {
176       try {
177         new HTable(conf, TableName.META_TABLE_NAME);
178       } catch (Exception e) {
179         LOG.info("Waiting for HBase to startup. Retries left: " + attemptsLeft,
180             e);
181         Threads.sleep(1000);
182       }
183     }
184 
185     LOG.info("Process-based HBase Cluster with " + numRegionServers +
186         " region servers up and running... \n\n");
187   }
188 
189   public void startRegionServer(int port) {
190     startServer(ServerType.RS, port);
191   }
192 
193   public void startMaster(int port) {
194     startServer(ServerType.MASTER, port);
195   }
196 
197   public void killRegionServer(int port) throws IOException {
198     killServer(ServerType.RS, port);
199   }
200 
201   public void killMaster() throws IOException {
202     killServer(ServerType.MASTER, 0);
203   }
204 
205   public void startZK() {
206     startServer(ServerType.ZK, 0);
207   }
208 
209   private void executeCommand(String command) {
210     executeCommand(command, null);
211   }
212 
213   private void executeCommand(String command, Map<String,
214       String> envOverrides) {
215     ensureShutdownHookInstalled();
216     LOG.debug("Command : " + command);
217 
218     try {
219       String [] envp = null;
220       if (envOverrides != null) {
221         Map<String, String> map = new HashMap<String, String>(
222             System.getenv());
223         map.putAll(envOverrides);
224         envp = new String[map.size()];
225         int idx = 0;
226         for (Map.Entry<String, String> e: map.entrySet()) {
227           envp[idx++] = e.getKey() + "=" + e.getValue();
228         }
229       }
230 
231       Process p = Runtime.getRuntime().exec(command, envp);
232 
233       BufferedReader stdInput = new BufferedReader(
234           new InputStreamReader(p.getInputStream()));
235       BufferedReader stdError = new BufferedReader(
236           new InputStreamReader(p.getErrorStream()));
237 
238       // read the output from the command
239       String s = null;
240       while ((s = stdInput.readLine()) != null) {
241         System.out.println(s);
242       }
243 
244       // read any errors from the attempted command
245       while ((s = stdError.readLine()) != null) {
246         System.out.println(s);
247       }
248     } catch (IOException e) {
249       LOG.error("Error running: " + command, e);
250     }
251   }
252 
253   private void shutdownAllProcesses() {
254     LOG.info("Killing daemons using pid files");
255     final List<String> pidFiles = new ArrayList<String>(daemonPidFiles);
256     for (String pidFile : pidFiles) {
257       int pid = 0;
258       try {
259         pid = readPidFromFile(pidFile);
260       } catch (IOException ex) {
261         LOG.error("Could not read pid from file " + pidFile);
262       }
263 
264       if (pid > 0) {
265         LOG.info("Killing pid " + pid + " (" + pidFile + ")");
266         killProcess(pid);
267       }
268     }
269   }
270 
271   private void ensureShutdownHookInstalled() {
272     if (shutdownHookInstalled) {
273       return;
274     }
275 
276     Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
277       @Override
278       public void run() {
279         shutdownAllProcesses();
280       }
281     }));
282 
283     shutdownHookInstalled = true;
284   }
285 
286   private void cleanupOldState() {
287     executeCommand("rm -rf " + workDir);
288   }
289 
290   private void writeStringToFile(String s, String fileName) {
291     try {
292       BufferedWriter out = new BufferedWriter(new FileWriter(fileName));
293       out.write(s);
294       out.close();
295     } catch (IOException e) {
296       LOG.error("Error writing to: " + fileName, e);
297     }
298   }
299 
300   private String serverWorkingDir(ServerType serverType, int port) {
301     return workDir + "/" + serverType + "-" + port;
302   }
303 
304   private int getServerPID(ServerType serverType, int port) throws IOException {
305     String pidFile = pidFilePath(serverType, port);
306     return readPidFromFile(pidFile);
307   }
308 
309   private static int readPidFromFile(String pidFile) throws IOException {
310     Scanner scanner = new Scanner(new File(pidFile));
311     try {
312       return scanner.nextInt();
313     } finally {
314       scanner.close();
315     }
316   }
317 
318   private String pidFilePath(ServerType serverType, int port) {
319     String dir = serverWorkingDir(serverType, port);
320     String user = System.getenv("USER");
321     String pidFile = String.format("%s/hbase-%s-%s.pid",
322                                    dir, user, serverType.fullName);
323     return pidFile;
324   }
325 
326   private void killServer(ServerType serverType, int port) throws IOException {
327     int pid = getServerPID(serverType, port);
328     if (pid > 0) {
329       LOG.info("Killing " + serverType + "; pid=" + pid);
330       killProcess(pid);
331     }
332   }
333 
334   private void killProcess(int pid) {
335     String cmd = "kill -s KILL " + pid;
336     executeCommand(cmd);
337   }
338 
339   private void startServer(ServerType serverType, int rsPort) {
340     // create working directory for this region server.
341     String dir = serverWorkingDir(serverType, rsPort);
342     String confStr = generateConfig(serverType, rsPort, dir);
343     LOG.debug("Creating directory " + dir);
344     new File(dir).mkdirs();
345 
346     writeStringToFile(confStr, dir + "/hbase-site.xml");
347 
348     // Set debug options to an empty string so that hbase-config.sh does not configure them
349     // using default ports. If we want to run remote debugging on process-based local cluster's
350     // daemons, we can automatically choose non-conflicting JDWP and JMX ports for each daemon
351     // and specify them here.
352     writeStringToFile(
353         "unset HBASE_MASTER_OPTS\n" +
354         "unset HBASE_REGIONSERVER_OPTS\n" +
355         "unset HBASE_ZOOKEEPER_OPTS\n" +
356         "HBASE_MASTER_DBG_OPTS=' '\n" +
357         "HBASE_REGIONSERVER_DBG_OPTS=' '\n" +
358         "HBASE_ZOOKEEPER_DBG_OPTS=' '\n" +
359         "HBASE_MASTER_JMX_OPTS=' '\n" +
360         "HBASE_REGIONSERVER_JMX_OPTS=' '\n" +
361         "HBASE_ZOOKEEPER_JMX_OPTS=' '\n",
362         dir + "/hbase-env.sh");
363 
364     Map<String, String> envOverrides = new HashMap<String, String>();
365     envOverrides.put("HBASE_LOG_DIR", dir);
366     envOverrides.put("HBASE_PID_DIR", dir);
367     try {
368       FileUtils.copyFile(
369           new File(hbaseHome, "conf/log4j.properties"),
370           new File(dir, "log4j.properties"));
371     } catch (IOException ex) {
372       LOG.error("Could not install log4j.properties into " + dir);
373     }
374 
375     executeCommand(hbaseDaemonScript + " --config " + dir +
376                    " start " + serverType.fullName, envOverrides);
377     daemonPidFiles.add(pidFilePath(serverType, rsPort));
378     logTailDirs.add(dir);
379   }
380 
381   private final String generateConfig(ServerType serverType, int rpcPort,
382       String daemonDir) {
383     StringBuilder sb = new StringBuilder();
384     Map<String, Object> confMap = new TreeMap<String, Object>();
385     confMap.put(HConstants.CLUSTER_DISTRIBUTED, true);
386 
387     if (serverType == ServerType.MASTER) {
388       confMap.put(HConstants.MASTER_PORT, rpcPort);
389 
390       int masterInfoPort = HBaseTestingUtility.randomFreePort();
391       reportWebUIPort("master", masterInfoPort);
392       confMap.put(HConstants.MASTER_INFO_PORT, masterInfoPort);
393     } else if (serverType == ServerType.RS) {
394       confMap.put(HConstants.REGIONSERVER_PORT, rpcPort);
395 
396       int rsInfoPort = HBaseTestingUtility.randomFreePort();
397       reportWebUIPort("region server", rsInfoPort);
398       confMap.put(HConstants.REGIONSERVER_INFO_PORT, rsInfoPort);
399     } else {
400       confMap.put(HConstants.ZOOKEEPER_DATA_DIR, daemonDir);
401     }
402 
403     confMap.put(HConstants.ZOOKEEPER_CLIENT_PORT, zkClientPort);
404     confMap.put(HConstants.HREGION_MAX_FILESIZE, MAX_FILE_SIZE_OVERRIDE);
405 
406     if (dfsCluster != null) {
407       String fsURL = "hdfs://" + HConstants.LOCALHOST + ":" + dfsCluster.getNameNodePort();
408       confMap.put("fs.defaultFS", fsURL);
409       confMap.put("hbase.rootdir", fsURL + "/hbase_test");
410     }
411 
412     sb.append("<configuration>\n");
413     for (Map.Entry<String, Object> entry : confMap.entrySet()) {
414       sb.append("  <property>\n");
415       sb.append("    <name>" + entry.getKey() + "</name>\n");
416       sb.append("    <value>" + entry.getValue() + "</value>\n");
417       sb.append("  </property>\n");
418     }
419     sb.append("</configuration>\n");
420     return sb.toString();
421   }
422 
423   private static void reportWebUIPort(String daemon, int port) {
424     LOG.info("Local " + daemon + " web UI is at http://"
425         + HConstants.LOCALHOST + ":" + port);
426   }
427 
428   public Configuration getConf() {
429     return conf;
430   }
431 
432   public void shutdown() {
433     if (dfsCluster != null) {
434       dfsCluster.shutdown();
435     }
436     shutdownAllProcesses();
437   }
438 
439   private static final Pattern TO_REMOVE_FROM_LOG_LINES_RE =
440       Pattern.compile("org\\.apache\\.hadoop\\.hbase\\.");
441 
442   private static final Pattern LOG_PATH_FORMAT_RE =
443       Pattern.compile("^.*/([A-Z]+)-(\\d+)/[^/]+$");
444 
445   private static String processLine(String line) {
446     Matcher m = TO_REMOVE_FROM_LOG_LINES_RE.matcher(line);
447     return m.replaceAll("");
448   }
449 
450   private final class LocalDaemonLogTailer implements Runnable {
451     private final Set<String> tailedFiles = new HashSet<String>();
452     private final List<String> dirList = new ArrayList<String>();
453     private final Object printLock = new Object();
454 
455     private final FilenameFilter LOG_FILES = new FilenameFilter() {
456       @Override
457       public boolean accept(File dir, String name) {
458         return name.endsWith(".out") || name.endsWith(".log");
459       }
460     };
461 
462     @Override
463     public void run() {
464       try {
465         runInternal();
466       } catch (IOException ex) {
467         LOG.error(ex);
468       }
469     }
470 
471     private void runInternal() throws IOException {
472       Thread.currentThread().setName(getClass().getSimpleName());
473       while (true) {
474         scanDirs();
475         try {
476           Thread.sleep(500);
477         } catch (InterruptedException e) {
478           LOG.error("Log tailer thread interrupted", e);
479           break;
480         }
481       }
482     }
483 
484     private void scanDirs() throws FileNotFoundException {
485       dirList.clear();
486       dirList.addAll(logTailDirs);
487       for (String d : dirList) {
488         File[] files = new File(d).listFiles(LOG_FILES);
489         if (files != null) {
490           for (File f : files) {
491             String filePath = f.getAbsolutePath();
492             if (!tailedFiles.contains(filePath)) {
493               tailedFiles.add(filePath);
494               startTailingFile(filePath);
495             }
496           }
497         }
498       }
499     }
500 
501     private void startTailingFile(final String filePath) throws FileNotFoundException {
502       final PrintStream dest = filePath.endsWith(".log") ? System.err : System.out;
503       final ServerType serverType;
504       final int serverPort;
505       Matcher m = LOG_PATH_FORMAT_RE.matcher(filePath);
506       if (m.matches()) {
507         serverType = ServerType.valueOf(m.group(1));
508         serverPort = Integer.valueOf(m.group(2));
509       } else {
510         LOG.error("Unrecognized log path format: " + filePath);
511         return;
512       }
513       final String logMsgPrefix =
514           "[" + serverType + (serverPort != 0 ? ":" + serverPort : "") + "] ";
515 
516       LOG.debug("Tailing " + filePath);
517       Thread t = new Thread(new Runnable() {
518         @Override
519         public void run() {
520           try {
521             FileInputStream fis = new FileInputStream(filePath);
522             BufferedReader br = new BufferedReader(new InputStreamReader(fis));
523             String line;
524             while (true) {
525               try {
526                 Thread.sleep(200);
527               } catch (InterruptedException e) {
528                 LOG.error("Tailer for " + filePath + " interrupted");
529                 break;
530               }
531               while ((line = br.readLine()) != null) {
532                 line = logMsgPrefix + processLine(line);
533                 synchronized (printLock) {
534                   if (line.endsWith("\n")) {
535                     dest.print(line);
536                   } else {
537                     dest.println(line);
538                   }
539                   dest.flush();
540                 }
541               }
542             }
543           } catch (IOException ex) {
544             LOG.error("Failed tailing " + filePath, ex);
545           }
546         }
547       });
548       t.setDaemon(true);
549       t.setName("Tailer for " + filePath);
550       t.start();
551     }
552 
553   }
554 
555   private void startDaemonLogTailer() {
556     logTailerThread = new Thread(new LocalDaemonLogTailer());
557     logTailerThread.setDaemon(true);
558     logTailerThread.start();
559   }
560 
561 }
562