View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.regionserver;
19  
20  import java.io.IOException;
21  import java.util.ArrayList;
22  import java.util.Arrays;
23  import java.util.List;
24  
25  import junit.framework.TestCase;
26  
27  import org.apache.commons.logging.Log;
28  import org.apache.commons.logging.LogFactory;
29  import org.apache.hadoop.conf.Configuration;
30  import org.apache.hadoop.fs.FileSystem;
31  import org.apache.hadoop.fs.Path;
32  import org.apache.hadoop.hbase.HBaseTestingUtility;
33  import org.apache.hadoop.hbase.HColumnDescriptor;
34  import org.apache.hadoop.hbase.HConstants;
35  import org.apache.hadoop.hbase.HRegionInfo;
36  import org.apache.hadoop.hbase.HTableDescriptor;
37  import org.apache.hadoop.hbase.testclassification.SmallTests;
38  import org.apache.hadoop.hbase.TableName;
39  import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
40  import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
41  import org.apache.hadoop.hbase.wal.DefaultWALProvider;
42  import org.apache.hadoop.hbase.wal.WALFactory;
43  import org.apache.hadoop.hbase.util.Bytes;
44  import org.apache.hadoop.hbase.util.FSUtils;
45  import org.junit.After;
46  import org.junit.experimental.categories.Category;
47  
48  import com.google.common.collect.Lists;
49  
50  @Category(SmallTests.class)
51  public class TestDefaultCompactSelection extends TestCase {
52    private final static Log LOG = LogFactory.getLog(TestDefaultCompactSelection.class);
53    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
54  
55    protected Configuration conf;
56    protected HStore store;
57    private static final String DIR=
58      TEST_UTIL.getDataTestDir(TestDefaultCompactSelection.class.getSimpleName()).toString();
59    private static Path TEST_FILE;
60  
61    protected static final int minFiles = 3;
62    protected static final int maxFiles = 5;
63  
64    protected static final long minSize = 10;
65    protected static final long maxSize = 2100;
66  
67    private WALFactory wals;
68    private HRegion region;
69  
70    @Override
71    public void setUp() throws Exception {
72      // setup config values necessary for store
73      this.conf = TEST_UTIL.getConfiguration();
74      this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
75      this.conf.setInt("hbase.hstore.compaction.min", minFiles);
76      this.conf.setInt("hbase.hstore.compaction.max", maxFiles);
77      this.conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, minSize);
78      this.conf.setLong("hbase.hstore.compaction.max.size", maxSize);
79      this.conf.setFloat("hbase.hstore.compaction.ratio", 1.0F);
80  
81      //Setting up a Store
82      final String id = TestDefaultCompactSelection.class.getName();
83      Path basedir = new Path(DIR);
84      final Path logdir = new Path(basedir, DefaultWALProvider.getWALDirectoryName(id));
85      HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family"));
86      FileSystem fs = FileSystem.get(conf);
87  
88      fs.delete(logdir, true);
89  
90      HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("table")));
91      htd.addFamily(hcd);
92      HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
93  
94      final Configuration walConf = new Configuration(conf);
95      FSUtils.setRootDir(walConf, basedir);
96      wals = new WALFactory(walConf, null, id);
97      region = HRegion.createHRegion(info, basedir, conf, htd);
98      HRegion.closeHRegion(region);
99      Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
100     region = new HRegion(tableDir, wals.getWAL(info.getEncodedNameAsBytes()), fs, conf, info, htd,
101         null);
102 
103     store = new HStore(region, hcd, conf);
104 
105     TEST_FILE = region.getRegionFileSystem().createTempName();
106     fs.createNewFile(TEST_FILE);
107   }
108 
109   @After
110   public void tearDown() throws IOException {
111     IOException ex = null;
112     try {
113       region.close();
114     } catch (IOException e) {
115       LOG.warn("Caught Exception", e);
116       ex = e;
117     }
118     try {
119       wals.close();
120     } catch (IOException e) {
121       LOG.warn("Caught Exception", e);
122       ex = e;
123     }
124     if (ex != null) {
125       throw ex;
126     }
127   }
128 
129   ArrayList<Long> toArrayList(long... numbers) {
130     ArrayList<Long> result = new ArrayList<Long>();
131     for (long i : numbers) {
132       result.add(i);
133     }
134     return result;
135   }
136 
137   List<StoreFile> sfCreate(long... sizes) throws IOException {
138     ArrayList<Long> ageInDisk = new ArrayList<Long>();
139     for (int i = 0; i < sizes.length; i++) {
140       ageInDisk.add(0L);
141     }
142     return sfCreate(toArrayList(sizes), ageInDisk);
143   }
144 
145   List<StoreFile> sfCreate(ArrayList<Long> sizes, ArrayList<Long> ageInDisk)
146     throws IOException {
147     return sfCreate(false, sizes, ageInDisk);
148   }
149 
150   List<StoreFile> sfCreate(boolean isReference, long... sizes) throws IOException {
151     ArrayList<Long> ageInDisk = new ArrayList<Long>(sizes.length);
152     for (int i = 0; i < sizes.length; i++) {
153       ageInDisk.add(0L);
154     }
155     return sfCreate(isReference, toArrayList(sizes), ageInDisk);
156   }
157 
158   List<StoreFile> sfCreate(boolean isReference, ArrayList<Long> sizes, ArrayList<Long> ageInDisk)
159       throws IOException {
160     List<StoreFile> ret = Lists.newArrayList();
161     for (int i = 0; i < sizes.size(); i++) {
162       ret.add(new MockStoreFile(TEST_UTIL, TEST_FILE,
163           sizes.get(i), ageInDisk.get(i), isReference, i));
164     }
165     return ret;
166   }
167 
168   long[] getSizes(List<StoreFile> sfList) {
169     long[] aNums = new long[sfList.size()];
170     for (int i = 0; i < sfList.size(); ++i) {
171       aNums[i] = sfList.get(i).getReader().length();
172     }
173     return aNums;
174   }
175 
176   void compactEquals(List<StoreFile> candidates, long... expected)
177     throws IOException {
178     compactEquals(candidates, false, false, expected);
179   }
180 
181   void compactEquals(List<StoreFile> candidates, boolean forcemajor, long... expected)
182     throws IOException {
183     compactEquals(candidates, forcemajor, false, expected);
184   }
185 
186   void compactEquals(List<StoreFile> candidates, boolean forcemajor, boolean isOffPeak,
187       long ... expected)
188   throws IOException {
189     store.forceMajor = forcemajor;
190     //Test Default compactions
191     CompactionRequest result = ((RatioBasedCompactionPolicy)store.storeEngine.getCompactionPolicy())
192         .selectCompaction(candidates, new ArrayList<StoreFile>(), false, isOffPeak, forcemajor);
193     List<StoreFile> actual = new ArrayList<StoreFile>(result.getFiles());
194     if (isOffPeak && !forcemajor) {
195       assertTrue(result.isOffPeak());
196     }
197     assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual)));
198     store.forceMajor = false;
199   }
200 
201   public void testCompactionRatio() throws IOException {
202     /**
203      * NOTE: these tests are specific to describe the implementation of the
204      * current compaction algorithm.  Developed to ensure that refactoring
205      * doesn't implicitly alter this.
206      */
207     long tooBig = maxSize + 1;
208 
209     // default case. preserve user ratio on size
210     compactEquals(sfCreate(100,50,23,12,12), 23, 12, 12);
211     // less than compact threshold = don't compact
212     compactEquals(sfCreate(100,50,25,12,12) /* empty */);
213     // greater than compact size = skip those
214     compactEquals(sfCreate(tooBig, tooBig, 700, 700, 700), 700, 700, 700);
215     // big size + threshold
216     compactEquals(sfCreate(tooBig, tooBig, 700,700) /* empty */);
217     // small files = don't care about ratio
218     compactEquals(sfCreate(7,1,1), 7,1,1);
219 
220     // don't exceed max file compact threshold
221     // note:  file selection starts with largest to smallest.
222     compactEquals(sfCreate(7, 6, 5, 4, 3, 2, 1), 5, 4, 3, 2, 1);
223 
224     compactEquals(sfCreate(50, 10, 10 ,10, 10), 10, 10, 10, 10);
225 
226     compactEquals(sfCreate(10, 10, 10, 10, 50), 10, 10, 10, 10);
227 
228     compactEquals(sfCreate(251, 253, 251, maxSize -1), 251, 253, 251);
229 
230     compactEquals(sfCreate(maxSize -1,maxSize -1,maxSize -1) /* empty */);
231 
232     // Always try and compact something to get below blocking storefile count
233     this.conf.setLong("hbase.hstore.compaction.min.size", 1);
234     store.storeEngine.getCompactionPolicy().setConf(conf);
235     compactEquals(sfCreate(512,256,128,64,32,16,8,4,2,1), 4,2,1);
236     this.conf.setLong("hbase.hstore.compaction.min.size", minSize);
237     store.storeEngine.getCompactionPolicy().setConf(conf);
238 
239     /* MAJOR COMPACTION */
240     // if a major compaction has been forced, then compact everything
241     compactEquals(sfCreate(50,25,12,12), true, 50, 25, 12, 12);
242     // also choose files < threshold on major compaction
243     compactEquals(sfCreate(12,12), true, 12, 12);
244     // even if one of those files is too big
245     compactEquals(sfCreate(tooBig, 12,12), true, tooBig, 12, 12);
246     // don't exceed max file compact threshold, even with major compaction
247     store.forceMajor = true;
248     compactEquals(sfCreate(7, 6, 5, 4, 3, 2, 1), 5, 4, 3, 2, 1);
249     store.forceMajor = false;
250     // if we exceed maxCompactSize, downgrade to minor
251     // if not, it creates a 'snowball effect' when files >> maxCompactSize:
252     // the last file in compaction is the aggregate of all previous compactions
253     compactEquals(sfCreate(100,50,23,12,12), true, 23, 12, 12);
254     conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1);
255     conf.setFloat("hbase.hregion.majorcompaction.jitter", 0);
256     store.storeEngine.getCompactionPolicy().setConf(conf);
257     try {
258       // trigger an aged major compaction
259       compactEquals(sfCreate(50,25,12,12), 50, 25, 12, 12);
260       // major sure exceeding maxCompactSize also downgrades aged minors
261       compactEquals(sfCreate(100,50,23,12,12), 23, 12, 12);
262     } finally {
263       conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24);
264       conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F);
265     }
266 
267     /* REFERENCES == file is from a region that was split */
268     // treat storefiles that have references like a major compaction
269     compactEquals(sfCreate(true, 100,50,25,12,12), 100, 50, 25, 12, 12);
270     // reference files shouldn't obey max threshold
271     compactEquals(sfCreate(true, tooBig, 12,12), tooBig, 12, 12);
272     // reference files should obey max file compact to avoid OOM
273     compactEquals(sfCreate(true, 7, 6, 5, 4, 3, 2, 1), 7, 6, 5, 4, 3);
274 
275     // empty case
276     compactEquals(new ArrayList<StoreFile>() /* empty */);
277     // empty case (because all files are too big)
278    compactEquals(sfCreate(tooBig, tooBig) /* empty */);
279   }
280 
281   public void testOffPeakCompactionRatio() throws IOException {
282     /*
283      * NOTE: these tests are specific to describe the implementation of the
284      * current compaction algorithm.  Developed to ensure that refactoring
285      * doesn't implicitly alter this.
286      */
287     // set an off-peak compaction threshold
288     this.conf.setFloat("hbase.hstore.compaction.ratio.offpeak", 5.0F);
289     store.storeEngine.getCompactionPolicy().setConf(this.conf);
290     // Test with and without the flag.
291     compactEquals(sfCreate(999, 50, 12, 12, 1), false, true, 50, 12, 12, 1);
292     compactEquals(sfCreate(999, 50, 12, 12, 1), 12, 12, 1);
293   }
294 
295   public void testStuckStoreCompaction() throws IOException {
296     // Select the smallest compaction if the store is stuck.
297     compactEquals(sfCreate(99,99,99,99,99,99, 30,30,30,30), 30, 30, 30);
298     // If not stuck, standard policy applies.
299     compactEquals(sfCreate(99,99,99,99,99, 30,30,30,30), 99, 30, 30, 30, 30);
300 
301     // Add sufficiently small files to compaction, though
302     compactEquals(sfCreate(99,99,99,99,99,99, 30,30,30,15), 30, 30, 30, 15);
303     // Prefer earlier compaction to latter if the benefit is not significant
304     compactEquals(sfCreate(99,99,99,99, 30,26,26,29,25,25), 30, 26, 26);
305     // Prefer later compaction if the benefit is significant.
306     compactEquals(sfCreate(99,99,99,99, 27,27,27,20,20,20), 20, 20, 20);
307   }
308 
309   public void testCompactionEmptyHFile() throws IOException {
310     // Set TTL
311     ScanInfo oldScanInfo = store.getScanInfo();
312     ScanInfo newScanInfo = new ScanInfo(oldScanInfo.getConfiguration(), oldScanInfo.getFamily(),
313         oldScanInfo.getMinVersions(), oldScanInfo.getMaxVersions(), 600,
314         oldScanInfo.getKeepDeletedCells(), oldScanInfo.getTimeToPurgeDeletes(),
315         oldScanInfo.getComparator());
316     store.setScanInfo(newScanInfo);
317     // Do not compact empty store file
318     List<StoreFile> candidates = sfCreate(0);
319     for (StoreFile file : candidates) {
320       if (file instanceof MockStoreFile) {
321         MockStoreFile mockFile = (MockStoreFile) file;
322         mockFile.setTimeRangeTracker(new TimeRangeTracker(-1, -1));
323         mockFile.setEntries(0);
324       }
325     }
326     // Test Default compactions
327     CompactionRequest result = ((RatioBasedCompactionPolicy) store.storeEngine
328         .getCompactionPolicy()).selectCompaction(candidates,
329         new ArrayList<StoreFile>(), false, false, false);
330     assertTrue(result.getFiles().size() == 0);
331     store.setScanInfo(oldScanInfo);
332   }
333 }