View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.procedure2.util;
20  
21  import java.io.IOException;
22  import java.io.OutputStream;
23  import java.util.Arrays;
24  
25  import org.apache.hadoop.hbase.classification.InterfaceAudience;
26  import org.apache.hadoop.hbase.classification.InterfaceStability;
27  
28  /**
29   * Similar to the ByteArrayOutputStream, with the exception that we can prepend an header.
30   * e.g. you write some data and you want to prepend an header that contains the data len or cksum.
31   * <code>
32   * ByteSlot slot = new ByteSlot();
33   * // write data
34   * slot.write(...);
35   * slot.write(...);
36   * // write header with the size of the written data
37   * slot.markHead();
38   * slot.write(Bytes.toBytes(slot.size()));
39   * // flush to stream as [header, data]
40   * slot.writeTo(stream);
41   * </code>
42   */
43  @InterfaceAudience.Private
44  @InterfaceStability.Evolving
45  public class ByteSlot extends OutputStream {
46    private static final int LARGE_GROW_SIZE_THRESHOLD = 8 << 20;
47    private static final int LARGE_GROW_SIZE = 1 << 20;
48    private static final int RESET_THRESHOLD = 64 << 20;
49    private static final int GROW_ALIGN = 128;
50  
51    private byte[] buf;
52    private int head;
53    private int size;
54  
55    public void reset() {
56      if (buf != null && buf.length > RESET_THRESHOLD) {
57        buf = null;
58      }
59      head = 0;
60      size = 0;
61    }
62  
63    public void markHead() {
64      head = size;
65    }
66  
67    public int getHead() {
68      return head;
69    }
70  
71    public int size() {
72      return size;
73    }
74  
75    public byte[] getBuffer() {
76      return buf;
77    }
78  
79    public void writeAt(int offset, int b) {
80      head = Math.min(head, offset);
81      buf[offset] = (byte)b;
82    }
83  
84    @Override
85    public void write(int b) {
86      ensureCapacity(size + 1);
87      buf[size++] = (byte)b;
88    }
89  
90    @Override
91    public void write(byte[] b, int off, int len) {
92      ensureCapacity(size + len);
93      System.arraycopy(b, off, buf, size, len);
94      size += len;
95    }
96  
97    public void writeTo(final OutputStream stream) throws IOException {
98      if (head != 0) {
99        stream.write(buf, head, size - head);
100       stream.write(buf, 0, head);
101     } else {
102       stream.write(buf, 0, size);
103     }
104   }
105 
106   private void ensureCapacity(int minCapacity) {
107     minCapacity = (minCapacity + (GROW_ALIGN - 1)) & -GROW_ALIGN;
108     if (buf == null) {
109       buf = new byte[minCapacity];
110     } else if (minCapacity > buf.length) {
111       int newCapacity;
112       if (buf.length <= LARGE_GROW_SIZE_THRESHOLD) {
113         newCapacity = buf.length << 1;
114       } else {
115         newCapacity = buf.length + LARGE_GROW_SIZE;
116       }
117       if (minCapacity > newCapacity) {
118         newCapacity = minCapacity;
119       }
120       buf = Arrays.copyOf(buf, newCapacity);
121     }
122   }
123 }