View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver.wal;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.EOFException;
24  import java.io.IOException;
25  import java.util.Iterator;
26  import java.util.List;
27  import java.util.UUID;
28  
29  import org.apache.commons.logging.Log;
30  import org.apache.commons.logging.LogFactory;
31  import org.apache.hadoop.hbase.classification.InterfaceAudience;
32  import org.apache.hadoop.hbase.HBaseInterfaceAudience;
33  import org.apache.hadoop.hbase.HRegionInfo;
34  import org.apache.hadoop.hbase.TableName;
35  import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
36  import org.apache.hadoop.hbase.util.Bytes;
37  import org.apache.hadoop.hbase.wal.WALKey;
38  import org.apache.hadoop.io.Writable;
39  import org.apache.hadoop.io.WritableUtils;
40  
41  /**
42   * A Key for an entry in the change log.
43   *
44   * The log intermingles edits to many tables and rows, so each log entry
45   * identifies the appropriate table and row.  Within a table and row, they're
46   * also sorted.
47   *
48   * <p>Some Transactional edits (START, COMMIT, ABORT) will not have an
49   * associated row.
50   * @deprecated use WALKey
51   */
52  @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION)
53  @Deprecated
54  public class HLogKey extends WALKey implements Writable {
55    private static final Log LOG = LogFactory.getLog(HLogKey.class);
56  
57    public HLogKey() {
58      super();
59    }
60  
61    @InterfaceAudience.Private
62    public HLogKey(final byte[] encodedRegionName, final TableName tablename, long logSeqNum,
63        final long now, UUID clusterId) {
64      super(encodedRegionName, tablename, logSeqNum, now, clusterId);
65    }
66  
67    public HLogKey(final byte[] encodedRegionName, final TableName tablename) {
68      super(encodedRegionName, tablename);
69    }
70  
71    @InterfaceAudience.Private
72    public HLogKey(final byte[] encodedRegionName, final TableName tablename, final long now) {
73      super(encodedRegionName, tablename, now);
74    }
75  
76    public HLogKey(final byte[] encodedRegionName,
77                   final TableName tablename,
78                   final long now,
79                   final MultiVersionConcurrencyControl mvcc) {
80      super(encodedRegionName, tablename, now, mvcc);
81    }
82  
83    /**
84     * Create the log key for writing to somewhere.
85     * We maintain the tablename mainly for debugging purposes.
86     * A regionName is always a sub-table object.
87     * <p>Used by log splitting and snapshots.
88     *
89     * @param encodedRegionName Encoded name of the region as returned by
90     * <code>HRegionInfo#getEncodedNameAsBytes()</code>.
91     * @param tablename   - name of table
92     * @param logSeqNum   - log sequence number
93     * @param now Time at which this edit was written.
94     * @param clusterIds the clusters that have consumed the change(used in Replication)
95     */
96    public HLogKey(
97        final byte[] encodedRegionName,
98        final TableName tablename,
99        long logSeqNum,
100       final long now,
101       List<UUID> clusterIds,
102       long nonceGroup,
103       long nonce,
104       MultiVersionConcurrencyControl mvcc) {
105     super(encodedRegionName, tablename, logSeqNum, now, clusterIds, nonceGroup, nonce, mvcc);
106   }
107 
108   /**
109    * Create the log key for writing to somewhere.
110    * We maintain the tablename mainly for debugging purposes.
111    * A regionName is always a sub-table object.
112    *
113    * @param encodedRegionName Encoded name of the region as returned by
114    * <code>HRegionInfo#getEncodedNameAsBytes()</code>.
115    * @param tablename
116    * @param now Time at which this edit was written.
117    * @param clusterIds the clusters that have consumed the change(used in Replication)
118    * @param nonceGroup
119    * @param nonce
120    */
121   public HLogKey(final byte[] encodedRegionName,
122                  final TableName tablename,
123                  final long now,
124                  List<UUID> clusterIds,
125                  long nonceGroup,
126                  long nonce,
127                  final MultiVersionConcurrencyControl mvcc) {
128     super(encodedRegionName, tablename, now, clusterIds, nonceGroup, nonce, mvcc);
129   }
130 
131   /**
132    * Create the log key for writing to somewhere.
133    * We maintain the tablename mainly for debugging purposes.
134    * A regionName is always a sub-table object.
135    *
136    * @param encodedRegionName Encoded name of the region as returned by
137    * <code>HRegionInfo#getEncodedNameAsBytes()</code>.
138    * @param tablename
139    * @param logSeqNum
140    * @param nonceGroup
141    * @param nonce
142    */
143   public HLogKey(final byte [] encodedRegionName, final TableName tablename, long logSeqNum,
144       long nonceGroup, long nonce, MultiVersionConcurrencyControl mvcc) {
145     super(encodedRegionName, tablename, logSeqNum, nonceGroup, nonce, mvcc);
146   }
147 
148   /**
149    * @deprecated Don't use these Writables methods. Use PB instead.
150    */
151   @Override
152   @Deprecated
153   public void write(DataOutput out) throws IOException {
154     LOG.warn("HLogKey is being serialized to writable - only expected in test code");
155     WritableUtils.writeVInt(out, VERSION.code);
156     if (compressionContext == null) {
157       Bytes.writeByteArray(out, this.encodedRegionName);
158       Bytes.writeByteArray(out, this.tablename.getName());
159     } else {
160       Compressor.writeCompressed(this.encodedRegionName, 0,
161           this.encodedRegionName.length, out,
162           compressionContext.regionDict);
163       Compressor.writeCompressed(this.tablename.getName(), 0,
164           this.tablename.getName().length, out,
165           compressionContext.tableDict);
166     }
167     out.writeLong(this.logSeqNum);
168     out.writeLong(this.writeTime);
169     // Don't need to write the clusters information as we are using protobufs from 0.95
170     // Writing only the first clusterId for testing the legacy read
171     Iterator<UUID> iterator = clusterIds.iterator();
172     if(iterator.hasNext()){
173       out.writeBoolean(true);
174       UUID clusterId = iterator.next();
175       out.writeLong(clusterId.getMostSignificantBits());
176       out.writeLong(clusterId.getLeastSignificantBits());
177     } else {
178       out.writeBoolean(false);
179     }
180   }
181 
182   @Override
183   public void readFields(DataInput in) throws IOException {
184     Version version = Version.UNVERSIONED;
185     // HLogKey was not versioned in the beginning.
186     // In order to introduce it now, we make use of the fact
187     // that encodedRegionName was written with Bytes.writeByteArray,
188     // which encodes the array length as a vint which is >= 0.
189     // Hence if the vint is >= 0 we have an old version and the vint
190     // encodes the length of encodedRegionName.
191     // If < 0 we just read the version and the next vint is the length.
192     // @see Bytes#readByteArray(DataInput)
193     setScopes(null); // writable HLogKey does not contain scopes
194     int len = WritableUtils.readVInt(in);
195     byte[] tablenameBytes = null;
196     if (len < 0) {
197       // what we just read was the version
198       version = Version.fromCode(len);
199       // We only compress V2 of WALkey.
200       // If compression is on, the length is handled by the dictionary
201       if (compressionContext == null || !version.atLeast(Version.COMPRESSED)) {
202         len = WritableUtils.readVInt(in);
203       }
204     }
205     if (compressionContext == null || !version.atLeast(Version.COMPRESSED)) {
206       this.encodedRegionName = new byte[len];
207       in.readFully(this.encodedRegionName);
208       tablenameBytes = Bytes.readByteArray(in);
209     } else {
210       this.encodedRegionName = Compressor.readCompressed(in, compressionContext.regionDict);
211       tablenameBytes = Compressor.readCompressed(in, compressionContext.tableDict);
212     }
213 
214     this.logSeqNum = in.readLong();
215     this.writeTime = in.readLong();
216 
217     this.clusterIds.clear();
218     if (version.atLeast(Version.INITIAL)) {
219       if (in.readBoolean()) {
220         // read the older log
221         // Definitely is the originating cluster
222         clusterIds.add(new UUID(in.readLong(), in.readLong()));
223       }
224     } else {
225       try {
226         // dummy read (former byte cluster id)
227         in.readByte();
228       } catch(EOFException e) {
229         // Means it's a very old key, just continue
230         if (LOG.isTraceEnabled()) LOG.trace(e);
231       }
232     }
233     try {
234       this.tablename = TableName.valueOf(tablenameBytes);
235     } catch (IllegalArgumentException iae) {
236       if (Bytes.toString(tablenameBytes).equals(TableName.OLD_META_STR)) {
237         // It is a pre-namespace meta table edit, continue with new format.
238         LOG.info("Got an old .META. edit, continuing with new format ");
239         this.tablename = TableName.META_TABLE_NAME;
240         this.encodedRegionName = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes();
241       } else if (Bytes.toString(tablenameBytes).equals(TableName.OLD_ROOT_STR)) {
242         this.tablename = TableName.OLD_ROOT_TABLE_NAME;
243          throw iae;
244       } else throw iae;
245     }
246     // Do not need to read the clusters information as we are using protobufs from 0.95
247   }
248 
249 }