1
2
3
4
5
6
7
8
9
10
11 package org.apache.hadoop.hbase.io.encoding;
12
13 import java.io.DataOutputStream;
14 import java.io.IOException;
15
16 import org.apache.commons.logging.Log;
17 import org.apache.commons.logging.LogFactory;
18 import org.apache.hadoop.hbase.Cell;
19 import org.apache.hadoop.hbase.KeyValue;
20 import org.apache.hadoop.hbase.classification.InterfaceAudience;
21 import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
22
23 @InterfaceAudience.Private
24 public class RowIndexEncoderV1 {
25 private static final Log LOG = LogFactory.getLog(RowIndexEncoderV1.class);
26
27
28 private Cell lastCell = null;
29
30 private DataOutputStream out;
31 private NoneEncoder encoder;
32 private int startOffset = -1;
33 private ByteArrayOutputStream rowsOffsetBAOS = new ByteArrayOutputStream(
34 64 * 4);
35
36 public RowIndexEncoderV1(DataOutputStream out,
37 HFileBlockDefaultEncodingContext encodingCtx) {
38 this.out = out;
39 this.encoder = new NoneEncoder(out, encodingCtx);
40 }
41
42 public int write(Cell cell) throws IOException {
43
44 if (!checkRow(cell)) {
45 if (startOffset < 0) {
46 startOffset = out.size();
47 }
48 rowsOffsetBAOS.writeInt(out.size() - startOffset);
49 }
50 lastCell = cell;
51 return encoder.write(cell);
52 }
53
54 protected boolean checkRow(final Cell cell) throws IOException {
55 boolean isDuplicateRow = false;
56 if (cell == null) {
57 throw new IOException("Key cannot be null or empty");
58 }
59 if (lastCell != null) {
60 int keyComp = KeyValue.COMPARATOR.compareRows(lastCell, cell);
61 if (keyComp > 0) {
62 throw new IOException("Added a key not lexically larger than"
63 + " previous. Current cell = " + cell + ", lastCell = " + lastCell);
64 } else if (keyComp == 0) {
65 isDuplicateRow = true;
66 }
67 }
68 return isDuplicateRow;
69 }
70
71 public void flush() throws IOException {
72 int onDiskDataSize = 0;
73 if (startOffset >= 0) {
74 onDiskDataSize = out.size() - startOffset;
75 }
76
77 out.writeInt(rowsOffsetBAOS.size() >> 2);
78 if (rowsOffsetBAOS.size() > 0) {
79 out.write(rowsOffsetBAOS.getBuffer(), 0, rowsOffsetBAOS.size());
80 }
81 out.writeInt(onDiskDataSize);
82 if (LOG.isTraceEnabled()) {
83 LOG.trace("RowNumber: " + (rowsOffsetBAOS.size() >> 2)
84 + ", onDiskDataSize: " + onDiskDataSize + ", totalOnDiskSize: "
85 + (out.size() - startOffset));
86 }
87 }
88
89 }