View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import static org.apache.hadoop.hbase.util.HFileArchiveTestingUtil.assertArchiveEqualToOriginal;
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertFalse;
24  import static org.junit.Assert.assertTrue;
25  import static org.mockito.Mockito.doReturn;
26  import static org.mockito.Mockito.spy;
27  
28  import java.io.IOException;
29  import java.util.List;
30  import java.util.Map;
31  import java.util.SortedMap;
32  import java.util.TreeMap;
33  
34  import org.apache.commons.logging.Log;
35  import org.apache.commons.logging.LogFactory;
36  import org.apache.hadoop.conf.Configuration;
37  import org.apache.hadoop.fs.FSDataOutputStream;
38  import org.apache.hadoop.fs.FileStatus;
39  import org.apache.hadoop.fs.FileSystem;
40  import org.apache.hadoop.fs.Path;
41  import org.apache.hadoop.hbase.ChoreService;
42  import org.apache.hadoop.hbase.CoordinatedStateManager;
43  import org.apache.hadoop.hbase.HBaseTestingUtility;
44  import org.apache.hadoop.hbase.HColumnDescriptor;
45  import org.apache.hadoop.hbase.HConstants;
46  import org.apache.hadoop.hbase.HRegionInfo;
47  import org.apache.hadoop.hbase.HTableDescriptor;
48  import org.apache.hadoop.hbase.MetaMockingUtil;
49  import org.apache.hadoop.hbase.NamespaceDescriptor;
50  import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
51  import org.apache.hadoop.hbase.ProcedureInfo;
52  import org.apache.hadoop.hbase.Server;
53  import org.apache.hadoop.hbase.ServerName;
54  import org.apache.hadoop.hbase.testclassification.SmallTests;
55  import org.apache.hadoop.hbase.TableDescriptors;
56  import org.apache.hadoop.hbase.TableName;
57  import org.apache.hadoop.hbase.client.ClusterConnection;
58  import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
59  import org.apache.hadoop.hbase.client.Result;
60  import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
61  import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination;
62  import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLogManagerDetails;
63  import org.apache.hadoop.hbase.executor.ExecutorService;
64  import org.apache.hadoop.hbase.io.Reference;
65  import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator;
66  import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
67  import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
68  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
69  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
70  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
71  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest;
72  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse;
73  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
74  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
75  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction;
76  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult;
77  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException;
78  import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
79  import org.apache.hadoop.hbase.regionserver.HStore;
80  import org.apache.hadoop.hbase.util.Bytes;
81  import org.apache.hadoop.hbase.util.FSUtils;
82  import org.apache.hadoop.hbase.util.HFileArchiveUtil;
83  import org.apache.hadoop.hbase.util.Triple;
84  import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
85  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
86  import org.junit.Test;
87  import org.junit.experimental.categories.Category;
88  import org.mockito.Mockito;
89  import org.mockito.invocation.InvocationOnMock;
90  import org.mockito.stubbing.Answer;
91  
92  import com.google.protobuf.RpcController;
93  import com.google.protobuf.Service;
94  import com.google.protobuf.ServiceException;
95  
96  @Category(SmallTests.class)
97  public class TestCatalogJanitor {
98    private static final Log LOG = LogFactory.getLog(TestCatalogJanitor.class);
99  
100   /**
101    * Pseudo server for below tests.
102    * Be sure to call stop on the way out else could leave some mess around.
103    */
104   class MockServer implements Server {
105     private final ClusterConnection connection;
106     private final Configuration c;
107 
108     MockServer(final HBaseTestingUtility htu)
109     throws NotAllMetaRegionsOnlineException, IOException, InterruptedException {
110       this.c = htu.getConfiguration();
111       ClientProtos.ClientService.BlockingInterface ri =
112         Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
113       MutateResponse.Builder builder = MutateResponse.newBuilder();
114       builder.setProcessed(true);
115       try {
116         Mockito.when(ri.mutate(
117           (RpcController)Mockito.any(), (MutateRequest)Mockito.any())).
118             thenReturn(builder.build());
119       } catch (ServiceException se) {
120         throw ProtobufUtil.getRemoteException(se);
121       }
122       try {
123         Mockito.when(ri.multi(
124           (RpcController)Mockito.any(), (MultiRequest)Mockito.any())).
125             thenAnswer(new Answer<MultiResponse>() {
126               @Override
127               public MultiResponse answer(InvocationOnMock invocation) throws Throwable {
128                 return buildMultiResponse( (MultiRequest)invocation.getArguments()[1]);
129               }
130             });
131       } catch (ServiceException se) {
132         throw ProtobufUtil.getRemoteException(se);
133       }
134       // Mock an HConnection and a AdminProtocol implementation.  Have the
135       // HConnection return the HRI.  Have the HRI return a few mocked up responses
136       // to make our test work.
137       this.connection =
138         HConnectionTestingUtility.getMockedConnectionAndDecorate(this.c,
139           Mockito.mock(AdminProtos.AdminService.BlockingInterface.class), ri,
140             ServerName.valueOf("example.org,12345,6789"),
141           HRegionInfo.FIRST_META_REGIONINFO);
142       // Set hbase.rootdir into test dir.
143       FileSystem fs = FileSystem.get(this.c);
144       Path rootdir = FSUtils.getRootDir(this.c);
145       FSUtils.setRootDir(this.c, rootdir);
146       AdminProtos.AdminService.BlockingInterface hri =
147         Mockito.mock(AdminProtos.AdminService.BlockingInterface.class);
148     }
149 
150     @Override
151     public ClusterConnection getConnection() {
152       return this.connection;
153     }
154 
155     @Override
156     public MetaTableLocator getMetaTableLocator() {
157       return null;
158     }
159 
160     @Override
161     public Configuration getConfiguration() {
162       return this.c;
163     }
164 
165     @Override
166     public ServerName getServerName() {
167       return ServerName.valueOf("mockserver.example.org", 1234, -1L);
168     }
169 
170     @Override
171     public ZooKeeperWatcher getZooKeeper() {
172       return null;
173     }
174 
175     @Override
176     public CoordinatedStateManager getCoordinatedStateManager() {
177       BaseCoordinatedStateManager m = Mockito.mock(BaseCoordinatedStateManager.class);
178       SplitLogManagerCoordination c = Mockito.mock(SplitLogManagerCoordination.class);
179       Mockito.when(m.getSplitLogManagerCoordination()).thenReturn(c);
180       SplitLogManagerDetails d = Mockito.mock(SplitLogManagerDetails.class);
181       Mockito.when(c.getDetails()).thenReturn(d);
182       return m;
183     }
184 
185     @Override
186     public void abort(String why, Throwable e) {
187       //no-op
188     }
189 
190     @Override
191     public boolean isAborted() {
192       return false;
193     }
194 
195     @Override
196     public boolean isStopped() {
197       return false;
198     }
199 
200     @Override
201     public void stop(String why) {
202     }
203 
204     @Override
205     public ChoreService getChoreService() {
206       return null;
207     }
208   }
209 
210   /**
211    * Mock MasterServices for tests below.
212    */
213   class MockMasterServices implements MasterServices {
214     private final MasterFileSystem mfs;
215     private final AssignmentManager asm;
216 
217     MockMasterServices(final Server server) throws IOException {
218       this.mfs = new MasterFileSystem(server, this);
219       this.asm = Mockito.mock(AssignmentManager.class);
220     }
221 
222     @Override
223     public void checkTableModifiable(TableName tableName) throws IOException {
224       //no-op
225     }
226 
227     @Override
228     public long createTable(
229         final HTableDescriptor desc,
230         final byte[][] splitKeys,
231         final long nonceGroup,
232         final long nonce) throws IOException {
233       // no-op
234       return -1;
235     }
236 
237     @Override
238     public AssignmentManager getAssignmentManager() {
239       return this.asm;
240     }
241 
242     @Override
243     public ExecutorService getExecutorService() {
244       return null;
245     }
246 
247     @Override
248     public ChoreService getChoreService() {
249       return null;
250     }
251 
252     @Override
253     public MasterFileSystem getMasterFileSystem() {
254       return this.mfs;
255     }
256 
257     @Override
258     public MasterCoprocessorHost getMasterCoprocessorHost() {
259       return null;
260     }
261 
262     @Override
263     public MasterQuotaManager getMasterQuotaManager() {
264       return null;
265     }
266 
267     @Override
268     public ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
269       return null;
270     }
271 
272     @Override
273     public ServerManager getServerManager() {
274       return null;
275     }
276 
277     @Override
278     public ZooKeeperWatcher getZooKeeper() {
279       return null;
280     }
281 
282     @Override
283     public CoordinatedStateManager getCoordinatedStateManager() {
284       return null;
285     }
286 
287     @Override
288     public MetaTableLocator getMetaTableLocator() {
289       return null;
290     }
291 
292     @Override
293     public ClusterConnection getConnection() {
294       return null;
295     }
296 
297     @Override
298     public Configuration getConfiguration() {
299       return mfs.conf;
300     }
301 
302     @Override
303     public ServerName getServerName() {
304       return null;
305     }
306 
307     @Override
308     public void abort(String why, Throwable e) {
309       //no-op
310     }
311 
312     @Override
313     public boolean isAborted() {
314       return false;
315     }
316 
317     private boolean stopped = false;
318 
319     @Override
320     public void stop(String why) {
321       stopped = true;
322     }
323 
324     @Override
325     public boolean isStopped() {
326       return stopped;
327     }
328 
329     @Override
330     public TableDescriptors getTableDescriptors() {
331       return new TableDescriptors() {
332         @Override
333         public HTableDescriptor remove(TableName tablename) throws IOException {
334           // TODO Auto-generated method stub
335           return null;
336         }
337 
338         @Override
339         public Map<String, HTableDescriptor> getAll() throws IOException {
340           // TODO Auto-generated method stub
341           return null;
342         }
343 
344         @Override
345         public HTableDescriptor get(TableName tablename)
346         throws IOException {
347           return createHTableDescriptor();
348         }
349 
350         @Override
351         public Map<String, HTableDescriptor> getByNamespace(String name) throws IOException {
352           return null;
353         }
354 
355         @Override
356         public void add(HTableDescriptor htd) throws IOException {
357           // TODO Auto-generated method stub
358 
359         }
360         @Override
361         public void setCacheOn() throws IOException {
362         }
363 
364         @Override
365         public void setCacheOff() throws IOException {
366         }
367       };
368     }
369 
370     @Override
371     public boolean isServerCrashProcessingEnabled() {
372       return true;
373     }
374 
375     @Override
376     public boolean registerService(Service instance) {
377       return false;
378     }
379 
380     @Override
381     public void createNamespace(NamespaceDescriptor descriptor) throws IOException {
382       //To change body of implemented methods use File | Settings | File Templates.
383     }
384 
385     @Override
386     public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException {
387       //To change body of implemented methods use File | Settings | File Templates.
388     }
389 
390     @Override
391     public void deleteNamespace(String name) throws IOException {
392       //To change body of implemented methods use File | Settings | File Templates.
393     }
394 
395     @Override
396     public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException {
397       return null;  //To change body of implemented methods use File | Settings | File Templates.
398     }
399 
400     @Override
401     public List<NamespaceDescriptor> listNamespaceDescriptors() throws IOException {
402       return null;  //To change body of implemented methods use File | Settings | File Templates.
403     }
404 
405     @Override
406     public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning)
407         throws IOException {
408       return false;  //To change body of implemented methods use File | Settings | File Templates.
409     }
410 
411     @Override
412     public List<ProcedureInfo> listProcedures() throws IOException {
413       return null;  //To change body of implemented methods use File | Settings | File Templates.
414     }
415 
416     @Override
417     public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
418       return null;  //To change body of implemented methods use File | Settings | File Templates.
419     }
420 
421     @Override
422     public List<TableName> listTableNamesByNamespace(String name) throws IOException {
423       return null;
424     }
425 
426     @Override
427     public long deleteTable(
428         final TableName tableName,
429         final long nonceGroup,
430         final long nonce) throws IOException {
431       return -1;
432     }
433 
434     public void truncateTable(
435         final TableName tableName,
436         final boolean preserveSplits,
437         final long nonceGroup,
438         final long nonce) throws IOException {
439     }
440 
441     @Override
442     public void modifyTable(
443         final TableName tableName,
444         final HTableDescriptor descriptor,
445         final long nonceGroup,
446         final long nonce) throws IOException {
447     }
448 
449     @Override
450     public long enableTable(
451         final TableName tableName,
452         final long nonceGroup,
453         final long nonce) throws IOException {
454       return -1;
455     }
456 
457     @Override
458     public long disableTable(
459         TableName tableName,
460         final long nonceGroup,
461         final long nonce) throws IOException {
462       return -1;
463     }
464 
465     @Override
466     public void addColumn(
467         final TableName tableName,
468         final HColumnDescriptor columnDescriptor,
469         final long nonceGroup,
470         final long nonce) throws IOException { }
471 
472     @Override
473     public void modifyColumn(
474         final TableName tableName,
475         final HColumnDescriptor descriptor,
476         final long nonceGroup,
477         final long nonce) throws IOException { }
478 
479     @Override
480     public void deleteColumn(
481         final TableName tableName,
482         final byte[] columnName,
483         final long nonceGroup,
484         final long nonce) throws IOException { }
485 
486     @Override
487     public TableLockManager getTableLockManager() {
488       return null;
489     }
490 
491     @Override
492     public void dispatchMergingRegions(HRegionInfo region_a, HRegionInfo region_b,
493         boolean forcible) throws IOException {
494     }
495 
496     @Override
497     public boolean isInitialized() {
498       // Auto-generated method stub
499       return false;
500     }
501 
502     @Override
503     public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
504       // Auto-generated method stub
505       return 0;
506     }
507 
508     @Override
509     public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
510       // Auto-generated method stub
511       return 0;
512     }
513   }
514 
515   @Test
516   public void testCleanParent() throws IOException, InterruptedException {
517     HBaseTestingUtility htu = new HBaseTestingUtility();
518     setRootDirAndCleanIt(htu, "testCleanParent");
519     Server server = new MockServer(htu);
520     try {
521       MasterServices services = new MockMasterServices(server);
522       CatalogJanitor janitor = new CatalogJanitor(server, services);
523       // Create regions.
524       HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table"));
525       htd.addFamily(new HColumnDescriptor("f"));
526       HRegionInfo parent =
527         new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
528             Bytes.toBytes("eee"));
529       HRegionInfo splita =
530         new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
531             Bytes.toBytes("ccc"));
532       HRegionInfo splitb =
533         new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
534             Bytes.toBytes("eee"));
535       // Test that when both daughter regions are in place, that we do not
536       // remove the parent.
537       Result r = createResult(parent, splita, splitb);
538       // Add a reference under splitA directory so we don't clear out the parent.
539       Path rootdir = services.getMasterFileSystem().getRootDir();
540       Path tabledir =
541         FSUtils.getTableDir(rootdir, htd.getTableName());
542       Path storedir = HStore.getStoreHomedir(tabledir, splita,
543           htd.getColumnFamilies()[0].getName());
544       Reference ref = Reference.createTopReference(Bytes.toBytes("ccc"));
545       long now = System.currentTimeMillis();
546       // Reference name has this format: StoreFile#REF_NAME_PARSER
547       Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
548       FileSystem fs = services.getMasterFileSystem().getFileSystem();
549       Path path = ref.write(fs, p);
550       assertTrue(fs.exists(path));
551       assertFalse(janitor.cleanParent(parent, r));
552       // Remove the reference file and try again.
553       assertTrue(fs.delete(p, true));
554       assertTrue(janitor.cleanParent(parent, r));
555     } finally {
556       server.stop("shutdown");
557     }
558   }
559 
560   /**
561    * Make sure parent gets cleaned up even if daughter is cleaned up before it.
562    * @throws IOException
563    * @throws InterruptedException
564    */
565   @Test
566   public void testParentCleanedEvenIfDaughterGoneFirst()
567   throws IOException, InterruptedException {
568     parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(
569       "testParentCleanedEvenIfDaughterGoneFirst", Bytes.toBytes("eee"));
570   }
571 
572   /**
573    * Make sure last parent with empty end key gets cleaned up even if daughter is cleaned up before it.
574    * @throws IOException
575    * @throws InterruptedException
576    */
577   @Test
578   public void testLastParentCleanedEvenIfDaughterGoneFirst()
579   throws IOException, InterruptedException {
580     parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(
581       "testLastParentCleanedEvenIfDaughterGoneFirst", new byte[0]);
582   }
583 
584   /**
585    * Make sure parent with specified end key gets cleaned up even if daughter is cleaned up before it.
586    *
587    * @param rootDir the test case name, used as the HBase testing utility root
588    * @param lastEndKey the end key of the split parent
589    * @throws IOException
590    * @throws InterruptedException
591    */
592   private void parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(
593   final String rootDir, final byte[] lastEndKey)
594   throws IOException, InterruptedException {
595     HBaseTestingUtility htu = new HBaseTestingUtility();
596     setRootDirAndCleanIt(htu, rootDir);
597     Server server = new MockServer(htu);
598     MasterServices services = new MockMasterServices(server);
599     CatalogJanitor janitor = new CatalogJanitor(server, services);
600     final HTableDescriptor htd = createHTableDescriptor();
601 
602     // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.
603 
604     // Parent
605     HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
606       lastEndKey);
607     // Sleep a second else the encoded name on these regions comes out
608     // same for all with same start key and made in same second.
609     Thread.sleep(1001);
610 
611     // Daughter a
612     HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
613       Bytes.toBytes("ccc"));
614     Thread.sleep(1001);
615     // Make daughters of daughter a; splitaa and splitab.
616     HRegionInfo splitaa = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
617       Bytes.toBytes("bbb"));
618     HRegionInfo splitab = new HRegionInfo(htd.getTableName(), Bytes.toBytes("bbb"),
619       Bytes.toBytes("ccc"));
620 
621     // Daughter b
622     HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
623       lastEndKey);
624     Thread.sleep(1001);
625     // Make Daughters of daughterb; splitba and splitbb.
626     HRegionInfo splitba = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
627       Bytes.toBytes("ddd"));
628     HRegionInfo splitbb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ddd"),
629     lastEndKey);
630 
631     // First test that our Comparator works right up in CatalogJanitor.
632     // Just fo kicks.
633     SortedMap<HRegionInfo, Result> regions =
634       new TreeMap<HRegionInfo, Result>(new CatalogJanitor.SplitParentFirstComparator());
635     // Now make sure that this regions map sorts as we expect it to.
636     regions.put(parent, createResult(parent, splita, splitb));
637     regions.put(splitb, createResult(splitb, splitba, splitbb));
638     regions.put(splita, createResult(splita, splitaa, splitab));
639     // Assert its properly sorted.
640     int index = 0;
641     for (Map.Entry<HRegionInfo, Result> e: regions.entrySet()) {
642       if (index == 0) {
643         assertTrue(e.getKey().getEncodedName().equals(parent.getEncodedName()));
644       } else if (index == 1) {
645         assertTrue(e.getKey().getEncodedName().equals(splita.getEncodedName()));
646       } else if (index == 2) {
647         assertTrue(e.getKey().getEncodedName().equals(splitb.getEncodedName()));
648       }
649       index++;
650     }
651 
652     // Now play around with the cleanParent function.  Create a ref from splita
653     // up to the parent.
654     Path splitaRef =
655       createReferences(services, htd, parent, splita, Bytes.toBytes("ccc"), false);
656     // Make sure actual super parent sticks around because splita has a ref.
657     assertFalse(janitor.cleanParent(parent, regions.get(parent)));
658 
659     //splitba, and split bb, do not have dirs in fs.  That means that if
660     // we test splitb, it should get cleaned up.
661     assertTrue(janitor.cleanParent(splitb, regions.get(splitb)));
662 
663     // Now remove ref from splita to parent... so parent can be let go and so
664     // the daughter splita can be split (can't split if still references).
665     // BUT make the timing such that the daughter gets cleaned up before we
666     // can get a chance to let go of the parent.
667     FileSystem fs = FileSystem.get(htu.getConfiguration());
668     assertTrue(fs.delete(splitaRef, true));
669     // Create the refs from daughters of splita.
670     Path splitaaRef =
671       createReferences(services, htd, splita, splitaa, Bytes.toBytes("bbb"), false);
672     Path splitabRef =
673       createReferences(services, htd, splita, splitab, Bytes.toBytes("bbb"), true);
674 
675     // Test splita.  It should stick around because references from splitab, etc.
676     assertFalse(janitor.cleanParent(splita, regions.get(splita)));
677 
678     // Now clean up parent daughter first.  Remove references from its daughters.
679     assertTrue(fs.delete(splitaaRef, true));
680     assertTrue(fs.delete(splitabRef, true));
681     assertTrue(janitor.cleanParent(splita, regions.get(splita)));
682 
683     // Super parent should get cleaned up now both splita and splitb are gone.
684     assertTrue(janitor.cleanParent(parent, regions.get(parent)));
685 
686     services.stop("test finished");
687     janitor.cancel(true);
688   }
689 
690   /**
691    * CatalogJanitor.scan() should not clean parent regions if their own
692    * parents are still referencing them. This ensures that grandfather regions
693    * do not point to deleted parent regions.
694    */
695   @Test
696   public void testScanDoesNotCleanRegionsWithExistingParents() throws Exception {
697     HBaseTestingUtility htu = new HBaseTestingUtility();
698     setRootDirAndCleanIt(htu, "testScanDoesNotCleanRegionsWithExistingParents");
699     Server server = new MockServer(htu);
700     MasterServices services = new MockMasterServices(server);
701 
702     final HTableDescriptor htd = createHTableDescriptor();
703 
704     // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.
705 
706     // Parent
707     HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
708       new byte[0], true);
709     // Sleep a second else the encoded name on these regions comes out
710     // same for all with same start key and made in same second.
711     Thread.sleep(1001);
712 
713     // Daughter a
714     HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
715       Bytes.toBytes("ccc"), true);
716     Thread.sleep(1001);
717     // Make daughters of daughter a; splitaa and splitab.
718     HRegionInfo splitaa = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
719       Bytes.toBytes("bbb"), false);
720     HRegionInfo splitab = new HRegionInfo(htd.getTableName(), Bytes.toBytes("bbb"),
721       Bytes.toBytes("ccc"), false);
722 
723     // Daughter b
724     HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
725         new byte[0]);
726     Thread.sleep(1001);
727 
728     final Map<HRegionInfo, Result> splitParents =
729         new TreeMap<HRegionInfo, Result>(new SplitParentFirstComparator());
730     splitParents.put(parent, createResult(parent, splita, splitb));
731     splita.setOffline(true); //simulate that splita goes offline when it is split
732     splitParents.put(splita, createResult(splita, splitaa,splitab));
733 
734     final Map<HRegionInfo, Result> mergedRegions = new TreeMap<HRegionInfo, Result>();
735     CatalogJanitor janitor = spy(new CatalogJanitor(server, services));
736     doReturn(new Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>>(
737             10, mergedRegions, splitParents)).when(janitor)
738         .getMergedRegionsAndSplitParents();
739 
740     //create ref from splita to parent
741     Path splitaRef =
742         createReferences(services, htd, parent, splita, Bytes.toBytes("ccc"), false);
743 
744     //parent and A should not be removed
745     assertEquals(0, janitor.scan());
746 
747     //now delete the ref
748     FileSystem fs = FileSystem.get(htu.getConfiguration());
749     assertTrue(fs.delete(splitaRef, true));
750 
751     //now, both parent, and splita can be deleted
752     assertEquals(2, janitor.scan());
753 
754     services.stop("test finished");
755     janitor.cancel(true);
756   }
757 
758   /**
759    * Test that we correctly archive all the storefiles when a region is deleted
760    * @throws Exception
761    */
762   @Test
763   public void testSplitParentFirstComparator() {
764     SplitParentFirstComparator comp = new SplitParentFirstComparator();
765     final HTableDescriptor htd = createHTableDescriptor();
766 
767     /*  Region splits:
768      *
769      *  rootRegion --- firstRegion --- firstRegiona
770      *              |               |- firstRegionb
771      *              |
772      *              |- lastRegion --- lastRegiona  --- lastRegionaa
773      *                             |                |- lastRegionab
774      *                             |- lastRegionb
775      *
776      *  rootRegion   :   []  - []
777      *  firstRegion  :   []  - bbb
778      *  lastRegion   :   bbb - []
779      *  firstRegiona :   []  - aaa
780      *  firstRegionb :   aaa - bbb
781      *  lastRegiona  :   bbb - ddd
782      *  lastRegionb  :   ddd - []
783      */
784 
785     // root region
786     HRegionInfo rootRegion = new HRegionInfo(htd.getTableName(),
787       HConstants.EMPTY_START_ROW,
788       HConstants.EMPTY_END_ROW, true);
789     HRegionInfo firstRegion = new HRegionInfo(htd.getTableName(),
790       HConstants.EMPTY_START_ROW,
791       Bytes.toBytes("bbb"), true);
792     HRegionInfo lastRegion = new HRegionInfo(htd.getTableName(),
793       Bytes.toBytes("bbb"),
794       HConstants.EMPTY_END_ROW, true);
795 
796     assertTrue(comp.compare(rootRegion, rootRegion) == 0);
797     assertTrue(comp.compare(firstRegion, firstRegion) == 0);
798     assertTrue(comp.compare(lastRegion, lastRegion) == 0);
799     assertTrue(comp.compare(rootRegion, firstRegion) < 0);
800     assertTrue(comp.compare(rootRegion, lastRegion) < 0);
801     assertTrue(comp.compare(firstRegion, lastRegion) < 0);
802 
803     //first region split into a, b
804     HRegionInfo firstRegiona = new HRegionInfo(htd.getTableName(),
805       HConstants.EMPTY_START_ROW,
806       Bytes.toBytes("aaa"), true);
807     HRegionInfo firstRegionb = new HRegionInfo(htd.getTableName(),
808         Bytes.toBytes("aaa"),
809       Bytes.toBytes("bbb"), true);
810     //last region split into a, b
811     HRegionInfo lastRegiona = new HRegionInfo(htd.getTableName(),
812       Bytes.toBytes("bbb"),
813       Bytes.toBytes("ddd"), true);
814     HRegionInfo lastRegionb = new HRegionInfo(htd.getTableName(),
815       Bytes.toBytes("ddd"),
816       HConstants.EMPTY_END_ROW, true);
817 
818     assertTrue(comp.compare(firstRegiona, firstRegiona) == 0);
819     assertTrue(comp.compare(firstRegionb, firstRegionb) == 0);
820     assertTrue(comp.compare(rootRegion, firstRegiona) < 0);
821     assertTrue(comp.compare(rootRegion, firstRegionb) < 0);
822     assertTrue(comp.compare(firstRegion, firstRegiona) < 0);
823     assertTrue(comp.compare(firstRegion, firstRegionb) < 0);
824     assertTrue(comp.compare(firstRegiona, firstRegionb) < 0);
825 
826     assertTrue(comp.compare(lastRegiona, lastRegiona) == 0);
827     assertTrue(comp.compare(lastRegionb, lastRegionb) == 0);
828     assertTrue(comp.compare(rootRegion, lastRegiona) < 0);
829     assertTrue(comp.compare(rootRegion, lastRegionb) < 0);
830     assertTrue(comp.compare(lastRegion, lastRegiona) < 0);
831     assertTrue(comp.compare(lastRegion, lastRegionb) < 0);
832     assertTrue(comp.compare(lastRegiona, lastRegionb) < 0);
833 
834     assertTrue(comp.compare(firstRegiona, lastRegiona) < 0);
835     assertTrue(comp.compare(firstRegiona, lastRegionb) < 0);
836     assertTrue(comp.compare(firstRegionb, lastRegiona) < 0);
837     assertTrue(comp.compare(firstRegionb, lastRegionb) < 0);
838 
839     HRegionInfo lastRegionaa = new HRegionInfo(htd.getTableName(),
840       Bytes.toBytes("bbb"),
841       Bytes.toBytes("ccc"), false);
842     HRegionInfo lastRegionab = new HRegionInfo(htd.getTableName(),
843       Bytes.toBytes("ccc"),
844       Bytes.toBytes("ddd"), false);
845 
846     assertTrue(comp.compare(lastRegiona, lastRegionaa) < 0);
847     assertTrue(comp.compare(lastRegiona, lastRegionab) < 0);
848     assertTrue(comp.compare(lastRegionaa, lastRegionab) < 0);
849 
850   }
851 
852   @Test
853   public void testArchiveOldRegion() throws Exception {
854     String table = "table";
855     HBaseTestingUtility htu = new HBaseTestingUtility();
856     setRootDirAndCleanIt(htu, "testCleanParent");
857     Server server = new MockServer(htu);
858     MasterServices services = new MockMasterServices(server);
859 
860     // create the janitor
861     CatalogJanitor janitor = new CatalogJanitor(server, services);
862 
863     // Create regions.
864     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
865     htd.addFamily(new HColumnDescriptor("f"));
866     HRegionInfo parent = new HRegionInfo(htd.getTableName(),
867         Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
868     HRegionInfo splita = new HRegionInfo(htd.getTableName(),
869         Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
870     HRegionInfo splitb = new HRegionInfo(htd.getTableName(),
871         Bytes.toBytes("ccc"),
872         Bytes.toBytes("eee"));
873 
874     // Test that when both daughter regions are in place, that we do not
875     // remove the parent.
876     Result parentMetaRow = createResult(parent, splita, splitb);
877     FileSystem fs = FileSystem.get(htu.getConfiguration());
878     Path rootdir = services.getMasterFileSystem().getRootDir();
879     // have to set the root directory since we use it in HFileDisposer to figure out to get to the
880     // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
881     // the single test passes, but when the full suite is run, things get borked).
882     FSUtils.setRootDir(fs.getConf(), rootdir);
883     Path tabledir = FSUtils.getTableDir(rootdir, htd.getTableName());
884     Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
885     Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
886       tabledir, htd.getColumnFamilies()[0].getName());
887     LOG.debug("Table dir:" + tabledir);
888     LOG.debug("Store dir:" + storedir);
889     LOG.debug("Store archive dir:" + storeArchive);
890 
891     // add a couple of store files that we can check for
892     FileStatus[] mockFiles = addMockStoreFiles(2, services, storedir);
893     // get the current store files for comparison
894     FileStatus[] storeFiles = fs.listStatus(storedir);
895     int index = 0;
896     for (FileStatus file : storeFiles) {
897       LOG.debug("Have store file:" + file.getPath());
898       assertEquals("Got unexpected store file", mockFiles[index].getPath(),
899         storeFiles[index].getPath());
900       index++;
901     }
902 
903     // do the cleaning of the parent
904     assertTrue(janitor.cleanParent(parent, parentMetaRow));
905     LOG.debug("Finished cleanup of parent region");
906 
907     // and now check to make sure that the files have actually been archived
908     FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
909     logFiles("archived files", storeFiles);
910     logFiles("archived files", archivedStoreFiles);
911 
912     assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
913 
914     // cleanup
915     FSUtils.delete(fs, rootdir, true);
916     services.stop("Test finished");
917     server.stop("Test finished");
918     janitor.cancel(true);
919   }
920 
921   /**
922    * @param description description of the files for logging
923    * @param storeFiles the status of the files to log
924    */
925   private void logFiles(String description, FileStatus[] storeFiles) {
926     LOG.debug("Current " + description + ": ");
927     for (FileStatus file : storeFiles) {
928       LOG.debug(file.getPath());
929     }
930   }
931 
932   /**
933    * Test that if a store file with the same name is present as those already backed up cause the
934    * already archived files to be timestamped backup
935    */
936   @Test
937   public void testDuplicateHFileResolution() throws Exception {
938     String table = "table";
939     HBaseTestingUtility htu = new HBaseTestingUtility();
940     setRootDirAndCleanIt(htu, "testCleanParent");
941     Server server = new MockServer(htu);
942     MasterServices services = new MockMasterServices(server);
943 
944     // create the janitor
945 
946     CatalogJanitor janitor = new CatalogJanitor(server, services);
947 
948     // Create regions.
949     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
950     htd.addFamily(new HColumnDescriptor("f"));
951     HRegionInfo parent = new HRegionInfo(htd.getTableName(),
952         Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
953     HRegionInfo splita = new HRegionInfo(htd.getTableName(),
954         Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
955     HRegionInfo splitb = new HRegionInfo(htd.getTableName(),
956         Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
957     // Test that when both daughter regions are in place, that we do not
958     // remove the parent.
959     Result r = createResult(parent, splita, splitb);
960 
961     FileSystem fs = FileSystem.get(htu.getConfiguration());
962 
963     Path rootdir = services.getMasterFileSystem().getRootDir();
964     // have to set the root directory since we use it in HFileDisposer to figure out to get to the
965     // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
966     // the single test passes, but when the full suite is run, things get borked).
967     FSUtils.setRootDir(fs.getConf(), rootdir);
968     Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
969     Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
970     System.out.println("Old root:" + rootdir);
971     System.out.println("Old table:" + tabledir);
972     System.out.println("Old store:" + storedir);
973 
974     Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
975       tabledir, htd.getColumnFamilies()[0].getName());
976     System.out.println("Old archive:" + storeArchive);
977 
978     // enable archiving, make sure that files get archived
979     addMockStoreFiles(2, services, storedir);
980     // get the current store files for comparison
981     FileStatus[] storeFiles = fs.listStatus(storedir);
982     // do the cleaning of the parent
983     assertTrue(janitor.cleanParent(parent, r));
984 
985     // and now check to make sure that the files have actually been archived
986     FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
987     assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
988 
989     // now add store files with the same names as before to check backup
990     // enable archiving, make sure that files get archived
991     addMockStoreFiles(2, services, storedir);
992 
993     // do the cleaning of the parent
994     assertTrue(janitor.cleanParent(parent, r));
995 
996     // and now check to make sure that the files have actually been archived
997     archivedStoreFiles = fs.listStatus(storeArchive);
998     assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs, true);
999 
1000     // cleanup
1001     services.stop("Test finished");
1002     server.stop("shutdown");
1003     janitor.cancel(true);
1004   }
1005 
1006   private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path storedir)
1007       throws IOException {
1008     // get the existing store files
1009     FileSystem fs = services.getMasterFileSystem().getFileSystem();
1010     fs.mkdirs(storedir);
1011     // create the store files in the parent
1012     for (int i = 0; i < count; i++) {
1013       Path storeFile = new Path(storedir, "_store" + i);
1014       FSDataOutputStream dos = fs.create(storeFile, true);
1015       dos.writeBytes("Some data: " + i);
1016       dos.close();
1017     }
1018     LOG.debug("Adding " + count + " store files to the storedir:" + storedir);
1019     // make sure the mock store files are there
1020     FileStatus[] storeFiles = fs.listStatus(storedir);
1021     assertEquals("Didn't have expected store files", count, storeFiles.length);
1022     return storeFiles;
1023   }
1024 
1025   private String setRootDirAndCleanIt(final HBaseTestingUtility htu,
1026       final String subdir)
1027   throws IOException {
1028     Path testdir = htu.getDataTestDir(subdir);
1029     FileSystem fs = FileSystem.get(htu.getConfiguration());
1030     if (fs.exists(testdir)) assertTrue(fs.delete(testdir, true));
1031     FSUtils.setRootDir(htu.getConfiguration(), testdir);
1032     return FSUtils.getRootDir(htu.getConfiguration()).toString();
1033   }
1034 
1035   /**
1036    * @param services Master services instance.
1037    * @param htd
1038    * @param parent
1039    * @param daughter
1040    * @param midkey
1041    * @param top True if we are to write a 'top' reference.
1042    * @return Path to reference we created.
1043    * @throws IOException
1044    */
1045   private Path createReferences(final MasterServices services,
1046       final HTableDescriptor htd, final HRegionInfo parent,
1047       final HRegionInfo daughter, final byte [] midkey, final boolean top)
1048   throws IOException {
1049     Path rootdir = services.getMasterFileSystem().getRootDir();
1050     Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
1051     Path storedir = HStore.getStoreHomedir(tabledir, daughter,
1052       htd.getColumnFamilies()[0].getName());
1053     Reference ref =
1054       top? Reference.createTopReference(midkey): Reference.createBottomReference(midkey);
1055     long now = System.currentTimeMillis();
1056     // Reference name has this format: StoreFile#REF_NAME_PARSER
1057     Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
1058     FileSystem fs = services.getMasterFileSystem().getFileSystem();
1059     ref.write(fs, p);
1060     return p;
1061   }
1062 
1063   private Result createResult(final HRegionInfo parent, final HRegionInfo a,
1064       final HRegionInfo b)
1065   throws IOException {
1066     return MetaMockingUtil.getMetaTableRowResult(parent, null, a, b);
1067   }
1068 
1069   private HTableDescriptor createHTableDescriptor() {
1070     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("t"));
1071     htd.addFamily(new HColumnDescriptor("f"));
1072     return htd;
1073   }
1074 
1075   private MultiResponse buildMultiResponse(MultiRequest req) {
1076     MultiResponse.Builder builder = MultiResponse.newBuilder();
1077     RegionActionResult.Builder regionActionResultBuilder =
1078         RegionActionResult.newBuilder();
1079     ResultOrException.Builder roeBuilder = ResultOrException.newBuilder();
1080     for (RegionAction regionAction: req.getRegionActionList()) {
1081       regionActionResultBuilder.clear();
1082       for (ClientProtos.Action action: regionAction.getActionList()) {
1083         roeBuilder.clear();
1084         roeBuilder.setResult(ClientProtos.Result.getDefaultInstance());
1085         roeBuilder.setIndex(action.getIndex());
1086         regionActionResultBuilder.addResultOrException(roeBuilder.build());
1087       }
1088       builder.addRegionActionResult(regionActionResultBuilder.build());
1089     }
1090     return builder.build();
1091   }
1092 
1093 }
1094