1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver;
21
22 import java.io.FileNotFoundException;
23 import java.io.IOException;
24 import java.io.InterruptedIOException;
25 import java.util.ArrayList;
26 import java.util.Collection;
27 import java.util.List;
28 import java.util.Map;
29 import java.util.UUID;
30
31 import org.apache.commons.logging.Log;
32 import org.apache.commons.logging.LogFactory;
33 import org.apache.hadoop.hbase.classification.InterfaceAudience;
34 import org.apache.hadoop.conf.Configuration;
35 import org.apache.hadoop.fs.FSDataInputStream;
36 import org.apache.hadoop.fs.FSDataOutputStream;
37 import org.apache.hadoop.fs.FileStatus;
38 import org.apache.hadoop.fs.FileSystem;
39 import org.apache.hadoop.fs.FileUtil;
40 import org.apache.hadoop.fs.Path;
41 import org.apache.hadoop.fs.permission.FsPermission;
42 import org.apache.hadoop.hbase.HColumnDescriptor;
43 import org.apache.hadoop.hbase.HConstants;
44 import org.apache.hadoop.hbase.HRegionInfo;
45 import org.apache.hadoop.hbase.HTableDescriptor;
46 import org.apache.hadoop.hbase.KeyValue;
47 import org.apache.hadoop.hbase.KeyValueUtil;
48 import org.apache.hadoop.hbase.backup.HFileArchiver;
49 import org.apache.hadoop.hbase.fs.HFileSystem;
50 import org.apache.hadoop.hbase.io.Reference;
51 import org.apache.hadoop.hbase.util.Bytes;
52 import org.apache.hadoop.hbase.util.FSHDFSUtils;
53 import org.apache.hadoop.hbase.util.FSUtils;
54 import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
55
56
57
58
59
60 @InterfaceAudience.Private
61 public class HRegionFileSystem {
62 private static final Log LOG = LogFactory.getLog(HRegionFileSystem.class);
63
64
65 public final static String REGION_INFO_FILE = ".regioninfo";
66
67
68 public static final String REGION_MERGES_DIR = ".merges";
69
70
71 public static final String REGION_SPLITS_DIR = ".splits";
72
73
74 private static final String REGION_TEMP_DIR = ".tmp";
75
76 private final HRegionInfo regionInfo;
77
78 private final HRegionInfo regionInfoForFs;
79 private final Configuration conf;
80 private final Path tableDir;
81 private final FileSystem fs;
82
83
84
85
86
87 private final int hdfsClientRetriesNumber;
88 private final int baseSleepBeforeRetries;
89 private static final int DEFAULT_HDFS_CLIENT_RETRIES_NUMBER = 10;
90 private static final int DEFAULT_BASE_SLEEP_BEFORE_RETRIES = 1000;
91
92
93
94
95
96
97
98
99 HRegionFileSystem(final Configuration conf, final FileSystem fs, final Path tableDir,
100 final HRegionInfo regionInfo) {
101 this.fs = fs;
102 this.conf = conf;
103 this.tableDir = tableDir;
104 this.regionInfo = regionInfo;
105 this.regionInfoForFs = ServerRegionReplicaUtil.getRegionInfoForFs(regionInfo);
106 this.hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
107 DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
108 this.baseSleepBeforeRetries = conf.getInt("hdfs.client.sleep.before.retries",
109 DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
110 }
111
112
113 public FileSystem getFileSystem() {
114 return this.fs;
115 }
116
117
118 public HRegionInfo getRegionInfo() {
119 return this.regionInfo;
120 }
121
122 public HRegionInfo getRegionInfoForFS() {
123 return this.regionInfoForFs;
124 }
125
126
127 public Path getTableDir() {
128 return this.tableDir;
129 }
130
131
132 public Path getRegionDir() {
133 return new Path(this.tableDir, this.regionInfoForFs.getEncodedName());
134 }
135
136
137
138
139
140 Path getTempDir() {
141 return new Path(getRegionDir(), REGION_TEMP_DIR);
142 }
143
144
145
146
147 void cleanupTempDir() throws IOException {
148 deleteDir(getTempDir());
149 }
150
151
152
153
154
155
156
157
158
159 public Path getStoreDir(final String familyName) {
160 return new Path(this.getRegionDir(), familyName);
161 }
162
163
164
165
166
167
168
169 Path createStoreDir(final String familyName) throws IOException {
170 Path storeDir = getStoreDir(familyName);
171 if(!fs.exists(storeDir) && !createDir(storeDir))
172 throw new IOException("Failed creating "+storeDir);
173 return storeDir;
174 }
175
176
177
178
179
180
181
182 public Collection<StoreFileInfo> getStoreFiles(final byte[] familyName) throws IOException {
183 return getStoreFiles(Bytes.toString(familyName));
184 }
185
186 public Collection<StoreFileInfo> getStoreFiles(final String familyName) throws IOException {
187 return getStoreFiles(familyName, true);
188 }
189
190
191
192
193
194
195
196 public Collection<StoreFileInfo> getStoreFiles(final String familyName, final boolean validate)
197 throws IOException {
198 Path familyDir = getStoreDir(familyName);
199 FileStatus[] files = FSUtils.listStatus(this.fs, familyDir);
200 if (files == null) {
201 LOG.debug("No StoreFiles for: " + familyDir);
202 return null;
203 }
204
205 ArrayList<StoreFileInfo> storeFiles = new ArrayList<StoreFileInfo>(files.length);
206 for (FileStatus status: files) {
207 if (validate && !StoreFileInfo.isValid(status)) {
208 LOG.warn("Invalid StoreFile: " + status.getPath());
209 continue;
210 }
211 StoreFileInfo info = ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo,
212 regionInfoForFs, familyName, status.getPath());
213 storeFiles.add(info);
214
215 }
216 return storeFiles;
217 }
218
219
220
221
222
223
224
225
226 Path getStoreFilePath(final String familyName, final String fileName) {
227 Path familyDir = getStoreDir(familyName);
228 return new Path(familyDir, fileName).makeQualified(this.fs);
229 }
230
231
232
233
234
235
236
237
238 StoreFileInfo getStoreFileInfo(final String familyName, final String fileName)
239 throws IOException {
240 Path familyDir = getStoreDir(familyName);
241 return ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo,
242 regionInfoForFs, familyName, new Path(familyDir, fileName));
243 }
244
245
246
247
248
249
250
251 public boolean hasReferences(final String familyName) throws IOException {
252 FileStatus[] files = FSUtils.listStatus(fs, getStoreDir(familyName),
253 new FSUtils.ReferenceFileFilter(fs));
254 return files != null && files.length > 0;
255 }
256
257
258
259
260
261
262
263 public boolean hasReferences(final HTableDescriptor htd) throws IOException {
264 for (HColumnDescriptor family : htd.getFamilies()) {
265 if (hasReferences(family.getNameAsString())) {
266 return true;
267 }
268 }
269 return false;
270 }
271
272
273
274
275
276 public Collection<String> getFamilies() throws IOException {
277 FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs));
278 if (fds == null) return null;
279
280 ArrayList<String> families = new ArrayList<String>(fds.length);
281 for (FileStatus status: fds) {
282 families.add(status.getPath().getName());
283 }
284
285 return families;
286 }
287
288
289
290
291
292
293 public void deleteFamily(final String familyName) throws IOException {
294
295 HFileArchiver.archiveFamily(fs, conf, regionInfoForFs, tableDir, Bytes.toBytes(familyName));
296
297
298 Path familyDir = getStoreDir(familyName);
299 if(fs.exists(familyDir) && !deleteDir(familyDir))
300 throw new IOException("Could not delete family " + familyName
301 + " from FileSystem for region " + regionInfoForFs.getRegionNameAsString() + "("
302 + regionInfoForFs.getEncodedName() + ")");
303 }
304
305
306
307
308
309
310 private static String generateUniqueName(final String suffix) {
311 String name = UUID.randomUUID().toString().replaceAll("-", "");
312 if (suffix != null) name += suffix;
313 return name;
314 }
315
316
317
318
319
320
321
322
323
324
325
326
327 public Path createTempName() {
328 return createTempName(null);
329 }
330
331
332
333
334
335
336
337
338
339
340
341
342
343 public Path createTempName(final String suffix) {
344 return new Path(getTempDir(), generateUniqueName(suffix));
345 }
346
347
348
349
350
351
352
353
354 public Path commitStoreFile(final String familyName, final Path buildPath) throws IOException {
355 return commitStoreFile(familyName, buildPath, -1, false);
356 }
357
358
359
360
361
362
363
364
365
366
367 private Path commitStoreFile(final String familyName, final Path buildPath,
368 final long seqNum, final boolean generateNewName) throws IOException {
369 Path storeDir = getStoreDir(familyName);
370 if(!fs.exists(storeDir) && !createDir(storeDir))
371 throw new IOException("Failed creating " + storeDir);
372
373 String name = buildPath.getName();
374 if (generateNewName) {
375 name = generateUniqueName((seqNum < 0) ? null : "_SeqId_" + seqNum + "_");
376 }
377 Path dstPath = new Path(storeDir, name);
378 if (!fs.exists(buildPath)) {
379 throw new FileNotFoundException(buildPath.toString());
380 }
381 LOG.debug("Committing store file " + buildPath + " as " + dstPath);
382
383 if (!rename(buildPath, dstPath)) {
384 throw new IOException("Failed rename of " + buildPath + " to " + dstPath);
385 }
386 return dstPath;
387 }
388
389
390
391
392
393
394
395 void commitStoreFiles(final Map<byte[], List<StoreFile>> storeFiles) throws IOException {
396 for (Map.Entry<byte[], List<StoreFile>> es: storeFiles.entrySet()) {
397 String familyName = Bytes.toString(es.getKey());
398 for (StoreFile sf: es.getValue()) {
399 commitStoreFile(familyName, sf.getPath());
400 }
401 }
402 }
403
404
405
406
407
408
409
410 public void removeStoreFile(final String familyName, final Path filePath)
411 throws IOException {
412 HFileArchiver.archiveStoreFile(this.conf, this.fs, this.regionInfoForFs,
413 this.tableDir, Bytes.toBytes(familyName), filePath);
414 }
415
416
417
418
419
420
421
422 public void removeStoreFiles(final String familyName, final Collection<StoreFile> storeFiles)
423 throws IOException {
424 HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.regionInfoForFs,
425 this.tableDir, Bytes.toBytes(familyName), storeFiles);
426 }
427
428
429
430
431
432
433
434
435
436
437
438
439 Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
440 throws IOException {
441
442 FileSystem srcFs = srcPath.getFileSystem(conf);
443 FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;
444
445
446
447
448 if (!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)) {
449 LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " +
450 "the destination store. Copying file over to destination filesystem.");
451 Path tmpPath = createTempName();
452 FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
453 LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
454 srcPath = tmpPath;
455 }
456
457 return commitStoreFile(familyName, srcPath, seqNum, true);
458 }
459
460
461
462
463
464 Path getSplitsDir() {
465 return new Path(getRegionDir(), REGION_SPLITS_DIR);
466 }
467
468 Path getSplitsDir(final HRegionInfo hri) {
469 return new Path(getSplitsDir(), hri.getEncodedName());
470 }
471
472
473
474
475 void cleanupSplitsDir() throws IOException {
476 deleteDir(getSplitsDir());
477 }
478
479
480
481
482
483
484
485 void cleanupAnySplitDetritus() throws IOException {
486 Path splitdir = this.getSplitsDir();
487 if (!fs.exists(splitdir)) return;
488
489
490
491
492
493
494
495 FileStatus[] daughters = FSUtils.listStatus(fs, splitdir, new FSUtils.DirFilter(fs));
496 if (daughters != null) {
497 for (FileStatus daughter: daughters) {
498 Path daughterDir = new Path(getTableDir(), daughter.getPath().getName());
499 if (fs.exists(daughterDir) && !deleteDir(daughterDir)) {
500 throw new IOException("Failed delete of " + daughterDir);
501 }
502 }
503 }
504 cleanupSplitsDir();
505 LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
506 }
507
508
509
510
511
512
513 void cleanupDaughterRegion(final HRegionInfo regionInfo) throws IOException {
514 Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
515 if (this.fs.exists(regionDir) && !deleteDir(regionDir)) {
516 throw new IOException("Failed delete of " + regionDir);
517 }
518 }
519
520
521
522
523
524
525
526
527 Path commitDaughterRegion(final HRegionInfo regionInfo)
528 throws IOException {
529 Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
530 Path daughterTmpDir = this.getSplitsDir(regionInfo);
531
532 if (fs.exists(daughterTmpDir)) {
533
534
535 Path regionInfoFile = new Path(daughterTmpDir, REGION_INFO_FILE);
536 byte[] regionInfoContent = getRegionInfoFileContent(regionInfo);
537 writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
538
539
540 if (!rename(daughterTmpDir, regionDir)) {
541 throw new IOException("Unable to rename " + daughterTmpDir + " to " + regionDir);
542 }
543 }
544
545 return regionDir;
546 }
547
548
549
550
551 void createSplitsDir() throws IOException {
552 Path splitdir = getSplitsDir();
553 if (fs.exists(splitdir)) {
554 LOG.info("The " + splitdir + " directory exists. Hence deleting it to recreate it");
555 if (!deleteDir(splitdir)) {
556 throw new IOException("Failed deletion of " + splitdir
557 + " before creating them again.");
558 }
559 }
560
561 if (!createDir(splitdir)) {
562 throw new IOException("Failed create of " + splitdir);
563 }
564 }
565
566
567
568
569
570
571
572
573
574
575
576
577
578 Path splitStoreFile(final HRegionInfo hri, final String familyName, final StoreFile f,
579 final byte[] splitRow, final boolean top, RegionSplitPolicy splitPolicy) throws IOException {
580
581 if (splitPolicy == null || !splitPolicy.skipStoreFileRangeCheck(familyName)) {
582
583
584 try {
585 if (top) {
586
587 KeyValue splitKey = KeyValueUtil.createFirstOnRow(splitRow);
588 byte[] lastKey = f.getLastKey();
589
590 if (lastKey == null) {
591 return null;
592 }
593 if (f.getComparator().compareFlatKey(splitKey.getBuffer(),
594 splitKey.getKeyOffset(), splitKey.getKeyLength(), lastKey, 0, lastKey.length) > 0) {
595 return null;
596 }
597 } else {
598
599 KeyValue splitKey = KeyValueUtil.createLastOnRow(splitRow);
600 byte[] firstKey = f.getFirstKey();
601
602 if (firstKey == null) {
603 return null;
604 }
605 if (f.getComparator().compareFlatKey(splitKey.getBuffer(),
606 splitKey.getKeyOffset(), splitKey.getKeyLength(), firstKey, 0, firstKey.length) < 0) {
607 return null;
608 }
609 }
610 } finally {
611 f.closeReader(f.getCacheConf() != null ? f.getCacheConf().shouldEvictOnClose() : true);
612 }
613 }
614
615 Path splitDir = new Path(getSplitsDir(hri), familyName);
616
617 Reference r =
618 top ? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow);
619
620
621
622
623 String parentRegionName = regionInfoForFs.getEncodedName();
624
625
626 Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName);
627 return r.write(fs, p);
628 }
629
630
631
632
633
634 Path getMergesDir() {
635 return new Path(getRegionDir(), REGION_MERGES_DIR);
636 }
637
638 Path getMergesDir(final HRegionInfo hri) {
639 return new Path(getMergesDir(), hri.getEncodedName());
640 }
641
642
643
644
645 void cleanupMergesDir() throws IOException {
646 deleteDir(getMergesDir());
647 }
648
649
650
651
652
653
654 void cleanupMergedRegion(final HRegionInfo mergedRegion) throws IOException {
655 Path regionDir = new Path(this.tableDir, mergedRegion.getEncodedName());
656 if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
657 throw new IOException("Failed delete of " + regionDir);
658 }
659 }
660
661
662
663
664
665
666 void createMergesDir() throws IOException {
667 Path mergesdir = getMergesDir();
668 if (fs.exists(mergesdir)) {
669 LOG.info("The " + mergesdir
670 + " directory exists. Hence deleting it to recreate it");
671 if (!fs.delete(mergesdir, true)) {
672 throw new IOException("Failed deletion of " + mergesdir
673 + " before creating them again.");
674 }
675 }
676 if (!fs.mkdirs(mergesdir))
677 throw new IOException("Failed create of " + mergesdir);
678 }
679
680
681
682
683
684
685
686
687
688
689
690 Path mergeStoreFile(final HRegionInfo mergedRegion, final String familyName,
691 final StoreFile f, final Path mergedDir)
692 throws IOException {
693 Path referenceDir = new Path(new Path(mergedDir,
694 mergedRegion.getEncodedName()), familyName);
695
696 Reference r = Reference.createTopReference(regionInfoForFs.getStartKey());
697
698
699
700
701 String mergingRegionName = regionInfoForFs.getEncodedName();
702
703
704 Path p = new Path(referenceDir, f.getPath().getName() + "."
705 + mergingRegionName);
706 return r.write(fs, p);
707 }
708
709
710
711
712
713
714
715 void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws IOException {
716 Path regionDir = new Path(this.tableDir, mergedRegionInfo.getEncodedName());
717 Path mergedRegionTmpDir = this.getMergesDir(mergedRegionInfo);
718
719 if (mergedRegionTmpDir != null && fs.exists(mergedRegionTmpDir)) {
720 if (!fs.rename(mergedRegionTmpDir, regionDir)) {
721 throw new IOException("Unable to rename " + mergedRegionTmpDir + " to "
722 + regionDir);
723 }
724 }
725 }
726
727
728
729
730
731
732
733
734
735 void logFileSystemState(final Log LOG) throws IOException {
736 FSUtils.logFileSystemState(fs, this.getRegionDir(), LOG);
737 }
738
739
740
741
742
743
744 private static byte[] getRegionInfoFileContent(final HRegionInfo hri) throws IOException {
745 return hri.toDelimitedByteArray();
746 }
747
748
749
750
751
752
753
754
755 public static HRegionInfo loadRegionInfoFileContent(final FileSystem fs, final Path regionDir)
756 throws IOException {
757 FSDataInputStream in = fs.open(new Path(regionDir, REGION_INFO_FILE));
758 try {
759 return HRegionInfo.parseFrom(in);
760 } finally {
761 in.close();
762 }
763 }
764
765
766
767
768 private static void writeRegionInfoFileContent(final Configuration conf, final FileSystem fs,
769 final Path regionInfoFile, final byte[] content) throws IOException {
770
771 FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
772
773 FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null);
774 try {
775 out.write(content);
776 } finally {
777 out.close();
778 }
779 }
780
781
782
783
784
785 void checkRegionInfoOnFilesystem() throws IOException {
786
787
788
789
790
791 byte[] content = getRegionInfoFileContent(regionInfoForFs);
792 try {
793 Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
794
795 FileStatus status = fs.getFileStatus(regionInfoFile);
796 if (status != null && status.getLen() == content.length) {
797
798
799 return;
800 }
801
802 LOG.info("Rewriting .regioninfo file at: " + regionInfoFile);
803 if (!fs.delete(regionInfoFile, false)) {
804 throw new IOException("Unable to remove existing " + regionInfoFile);
805 }
806 } catch (FileNotFoundException e) {
807 LOG.warn(REGION_INFO_FILE + " file not found for region: " + regionInfoForFs.getEncodedName() +
808 " on table " + regionInfo.getTable());
809 }
810
811
812 writeRegionInfoOnFilesystem(content, true);
813 }
814
815
816
817
818
819 private void writeRegionInfoOnFilesystem(boolean useTempDir) throws IOException {
820 byte[] content = getRegionInfoFileContent(regionInfoForFs);
821 writeRegionInfoOnFilesystem(content, useTempDir);
822 }
823
824
825
826
827
828
829 private void writeRegionInfoOnFilesystem(final byte[] regionInfoContent,
830 final boolean useTempDir) throws IOException {
831 Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
832 if (useTempDir) {
833
834
835
836
837
838
839 Path tmpPath = new Path(getTempDir(), REGION_INFO_FILE);
840
841
842
843
844
845 if (FSUtils.isExists(fs, tmpPath)) {
846 FSUtils.delete(fs, tmpPath, true);
847 }
848
849
850 writeRegionInfoFileContent(conf, fs, tmpPath, regionInfoContent);
851
852
853 if (fs.exists(tmpPath) && !rename(tmpPath, regionInfoFile)) {
854 throw new IOException("Unable to rename " + tmpPath + " to " + regionInfoFile);
855 }
856 } else {
857
858 writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
859 }
860 }
861
862
863
864
865
866
867
868
869
870 public static HRegionFileSystem createRegionOnFileSystem(final Configuration conf,
871 final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
872 HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
873 Path regionDir = regionFs.getRegionDir();
874
875 if (fs.exists(regionDir)) {
876 LOG.warn("Trying to create a region that already exists on disk: " + regionDir);
877 throw new IOException("The specified region already exists on disk: " + regionDir);
878 }
879
880
881 if (!createDirOnFileSystem(fs, conf, regionDir)) {
882 LOG.warn("Unable to create the region directory: " + regionDir);
883 throw new IOException("Unable to create region directory: " + regionDir);
884 }
885
886
887 regionFs.writeRegionInfoOnFilesystem(false);
888 return regionFs;
889 }
890
891
892
893
894
895
896
897
898
899
900 public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf,
901 final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo, boolean readOnly)
902 throws IOException {
903 HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
904 Path regionDir = regionFs.getRegionDir();
905
906 if (!fs.exists(regionDir)) {
907 LOG.warn("Trying to open a region that do not exists on disk: " + regionDir);
908 throw new IOException("The specified region do not exists on disk: " + regionDir);
909 }
910
911 if (!readOnly) {
912
913 regionFs.cleanupTempDir();
914 regionFs.cleanupSplitsDir();
915 regionFs.cleanupMergesDir();
916
917
918 regionFs.checkRegionInfoOnFilesystem();
919 }
920
921 return regionFs;
922 }
923
924
925
926
927
928
929
930
931
932 public static void deleteRegionFromFileSystem(final Configuration conf,
933 final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
934 HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
935 Path regionDir = regionFs.getRegionDir();
936
937 if (!fs.exists(regionDir)) {
938 LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
939 return;
940 }
941
942 if (LOG.isDebugEnabled()) {
943 LOG.debug("DELETING region " + regionDir);
944 }
945
946
947 Path rootDir = FSUtils.getRootDir(conf);
948 HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);
949
950
951 if (!fs.delete(regionDir, true)) {
952 LOG.warn("Failed delete of " + regionDir);
953 }
954 }
955
956
957
958
959
960
961
962
963 boolean createDir(Path dir) throws IOException {
964 int i = 0;
965 IOException lastIOE = null;
966 do {
967 try {
968 return fs.mkdirs(dir);
969 } catch (IOException ioe) {
970 lastIOE = ioe;
971 if (fs.exists(dir)) return true;
972 try {
973 sleepBeforeRetry("Create Directory", i+1);
974 } catch (InterruptedException e) {
975 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
976 }
977 }
978 } while (++i <= hdfsClientRetriesNumber);
979 throw new IOException("Exception in createDir", lastIOE);
980 }
981
982
983
984
985
986
987
988
989 boolean rename(Path srcpath, Path dstPath) throws IOException {
990 IOException lastIOE = null;
991 int i = 0;
992 do {
993 try {
994 return fs.rename(srcpath, dstPath);
995 } catch (IOException ioe) {
996 lastIOE = ioe;
997 if (!fs.exists(srcpath) && fs.exists(dstPath)) return true;
998
999 try {
1000 sleepBeforeRetry("Rename Directory", i+1);
1001 } catch (InterruptedException e) {
1002 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
1003 }
1004 }
1005 } while (++i <= hdfsClientRetriesNumber);
1006
1007 throw new IOException("Exception in rename", lastIOE);
1008 }
1009
1010
1011
1012
1013
1014
1015
1016 boolean deleteDir(Path dir) throws IOException {
1017 IOException lastIOE = null;
1018 int i = 0;
1019 do {
1020 try {
1021 return fs.delete(dir, true);
1022 } catch (IOException ioe) {
1023 lastIOE = ioe;
1024 if (!fs.exists(dir)) return true;
1025
1026 try {
1027 sleepBeforeRetry("Delete Directory", i+1);
1028 } catch (InterruptedException e) {
1029 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
1030 }
1031 }
1032 } while (++i <= hdfsClientRetriesNumber);
1033
1034 throw new IOException("Exception in DeleteDir", lastIOE);
1035 }
1036
1037
1038
1039
1040 private void sleepBeforeRetry(String msg, int sleepMultiplier) throws InterruptedException {
1041 sleepBeforeRetry(msg, sleepMultiplier, baseSleepBeforeRetries, hdfsClientRetriesNumber);
1042 }
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054 private static boolean createDirOnFileSystem(FileSystem fs, Configuration conf, Path dir)
1055 throws IOException {
1056 int i = 0;
1057 IOException lastIOE = null;
1058 int hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
1059 DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
1060 int baseSleepBeforeRetries = conf.getInt("hdfs.client.sleep.before.retries",
1061 DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
1062 do {
1063 try {
1064 return fs.mkdirs(dir);
1065 } catch (IOException ioe) {
1066 lastIOE = ioe;
1067 if (fs.exists(dir)) return true;
1068 try {
1069 sleepBeforeRetry("Create Directory", i+1, baseSleepBeforeRetries, hdfsClientRetriesNumber);
1070 } catch (InterruptedException e) {
1071 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
1072 }
1073 }
1074 } while (++i <= hdfsClientRetriesNumber);
1075
1076 throw new IOException("Exception in createDir", lastIOE);
1077 }
1078
1079
1080
1081
1082
1083 private static void sleepBeforeRetry(String msg, int sleepMultiplier, int baseSleepBeforeRetries,
1084 int hdfsClientRetriesNumber) throws InterruptedException {
1085 if (sleepMultiplier > hdfsClientRetriesNumber) {
1086 LOG.debug(msg + ", retries exhausted");
1087 return;
1088 }
1089 LOG.debug(msg + ", sleeping " + baseSleepBeforeRetries + " times " + sleepMultiplier);
1090 Thread.sleep((long)baseSleepBeforeRetries * sleepMultiplier);
1091 }
1092 }